bitkeeper revision 1.1564 (4295ecb2jzOPE0em5dg6Hu_4rbzFCg)
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Thu, 26 May 2005 15:35:14 +0000 (15:35 +0000)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Thu, 26 May 2005 15:35:14 +0000 (15:35 +0000)
Port CPU setup code from Linux 2.6.
Signed-off-by: Keir Fraser <keir@xensource.com>
16 files changed:
.rootkeys
xen/arch/x86/Makefile
xen/arch/x86/cpu/amd.c [new file with mode: 0644]
xen/arch/x86/cpu/centaur.c [new file with mode: 0644]
xen/arch/x86/cpu/common.c [new file with mode: 0644]
xen/arch/x86/cpu/cpu.h [new file with mode: 0644]
xen/arch/x86/cpu/cyrix.c [new file with mode: 0644]
xen/arch/x86/cpu/intel.c [new file with mode: 0644]
xen/arch/x86/cpu/intel_cacheinfo.c [new file with mode: 0644]
xen/arch/x86/cpu/rise.c [new file with mode: 0644]
xen/arch/x86/cpu/transmeta.c [new file with mode: 0644]
xen/arch/x86/setup.c
xen/include/asm-x86/config.h
xen/include/asm-x86/cpufeature.h
xen/include/asm-x86/msr.h
xen/include/asm-x86/processor.h

index 0a86cf84079686b99bf623e1564beecaeeb9bb5e..4b833c13a728602b6f28318ba32971a6c00eea69 100644 (file)
--- a/.rootkeys
+++ b/.rootkeys
 3ddb79bcSC_LvnmFlX-T5iTgaR0SKg xen/arch/x86/boot/x86_32.S
 40e42bdbNu4MjI750THP_8J1S-Sa0g xen/arch/x86/boot/x86_64.S
 4107c15e-VmEcLsE-7JCXZaabI8C7A xen/arch/x86/cdb.c
+4295ecb1Ynez_TseZvDdjD7PzVMDiw xen/arch/x86/cpu/amd.c
+4295ecb1KPPNny26nBEJzK4pAG-KXQ xen/arch/x86/cpu/centaur.c
+4295ecb1QnJx9cbqCJQ1o4TTFQL5Vg xen/arch/x86/cpu/common.c
+4295ecb1ZIJLN5uklV1xompN7DN1WQ xen/arch/x86/cpu/cpu.h
+4295ecb1g6Ye-zy_oXVQQaKw4AtDmw xen/arch/x86/cpu/cyrix.c
+4295ecb1MOdQxXznHu3g-p5DzhMv8g xen/arch/x86/cpu/intel.c
+4295ecb1LsW7ov9JOtRP8euvJKbgbQ xen/arch/x86/cpu/intel_cacheinfo.c
+4295ecb1AeClyruqwLz-xDthMZ5eoA xen/arch/x86/cpu/rise.c
+4295ecb1GO92quFeyoVz2LsPQcFuHg xen/arch/x86/cpu/transmeta.c
 3ddb79bcUrk2EIaM5VsT6wUudH1kkg xen/arch/x86/delay.c
 4294b5ee34eGSh5YNDKMSxBIOycluw xen/arch/x86/dmi_scan.c
 40e34414WiQO4h2m3tcpaCPn7SyYyg xen/arch/x86/dom0_ops.c
index 3978f2ea0686cda308bb6efef981b6a0afaa41f3..d23182897c90d5b152134dfc0edcf029001e41c2 100644 (file)
@@ -6,6 +6,14 @@ OBJS += $(patsubst %.c,%.o,$(wildcard $(TARGET_SUBARCH)/*.c))
 OBJS += $(patsubst %.c,%.o,$(wildcard acpi/*.c))
 OBJS += $(patsubst %.c,%.o,$(wildcard mtrr/*.c))
 OBJS += $(patsubst %.c,%.o,$(wildcard genapic/*.c))
+OBJS += $(patsubst %.c,%.o,$(wildcard cpu/*.c))
+
+ifeq ($(TARGET_SUBARCH),x86_64) 
+OBJS := $(subst cpu/centaur.o,,$(OBJS))
+OBJS := $(subst cpu/cyrix.o,,$(OBJS))
+OBJS := $(subst cpu/rise.o,,$(OBJS))
+OBJS := $(subst cpu/transmeta.o,,$(OBJS))
+endif
 
 OBJS := $(subst $(TARGET_SUBARCH)/asm-offsets.o,,$(OBJS))
 
@@ -38,6 +46,7 @@ clean:
        rm -f mtrr/*.o mtrr/*~ mtrr/core
        rm -f acpi/*.o acpi/*~ acpi/core
        rm -f genapic/*.o genapic/*~ genapic/core
+       rm -f cpu/*.o cpu/*~ cpu/core
 
 delete-unfresh-files:
        # nothing
diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
new file mode 100644 (file)
index 0000000..1241e50
--- /dev/null
@@ -0,0 +1,254 @@
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/bitops.h>
+#include <xen/mm.h>
+#include <xen/smp.h>
+#include <asm/io.h>
+#include <asm/msr.h>
+#include <asm/processor.h>
+
+#include "cpu.h"
+
+#define num_physpages 0
+
+/*
+ *     B step AMD K6 before B 9730xxxx have hardware bugs that can cause
+ *     misexecution of code under Linux. Owners of such processors should
+ *     contact AMD for precise details and a CPU swap.
+ *
+ *     See     http://www.multimania.com/poulot/k6bug.html
+ *             http://www.amd.com/K6/k6docs/revgd.html
+ *
+ *     The following test is erm.. interesting. AMD neglected to up
+ *     the chip setting when fixing the bug but they also tweaked some
+ *     performance at the same time..
+ */
+extern void vide(void);
+__asm__(".align 4\nvide: ret");
+
+static void __init init_amd(struct cpuinfo_x86 *c)
+{
+       u32 l, h;
+       int mbytes = num_physpages >> (20-PAGE_SHIFT);
+       int r;
+
+       /*
+        *      FIXME: We should handle the K5 here. Set up the write
+        *      range and also turn on MSR 83 bits 4 and 31 (write alloc,
+        *      no bus pipeline)
+        */
+
+       /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
+          3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
+       clear_bit(0*32+31, c->x86_capability);
+       
+       r = get_model_name(c);
+
+       switch(c->x86)
+       {
+               case 4:
+               /*
+                * General Systems BIOSen alias the cpu frequency registers
+                * of the Elan at 0x000df000. Unfortuantly, one of the Linux
+                * drivers subsequently pokes it, and changes the CPU speed.
+                * Workaround : Remove the unneeded alias.
+                */
+#define CBAR           (0xfffc) /* Configuration Base Address  (32-bit) */
+#define CBAR_ENB       (0x80000000)
+#define CBAR_KEY       (0X000000CB)
+                       if (c->x86_model==9 || c->x86_model == 10) {
+                               if (inl (CBAR) & CBAR_ENB)
+                                       outl (0 | CBAR_KEY, CBAR);
+                       }
+                       break;
+               case 5:
+                       if( c->x86_model < 6 )
+                       {
+                               /* Based on AMD doc 20734R - June 2000 */
+                               if ( c->x86_model == 0 ) {
+                                       clear_bit(X86_FEATURE_APIC, c->x86_capability);
+                                       set_bit(X86_FEATURE_PGE, c->x86_capability);
+                               }
+                               break;
+                       }
+                       
+                       if ( c->x86_model == 6 && c->x86_mask == 1 ) {
+                               const int K6_BUG_LOOP = 1000000;
+                               int n;
+                               void (*f_vide)(void);
+                               unsigned long d, d2;
+                               
+                               printk(KERN_INFO "AMD K6 stepping B detected - ");
+                               
+                               /*
+                                * It looks like AMD fixed the 2.6.2 bug and improved indirect 
+                                * calls at the same time.
+                                */
+
+                               n = K6_BUG_LOOP;
+                               f_vide = vide;
+                               rdtscl(d);
+                               while (n--) 
+                                       f_vide();
+                               rdtscl(d2);
+                               d = d2-d;
+                               
+                               /* Knock these two lines out if it debugs out ok */
+                               printk(KERN_INFO "AMD K6 stepping B detected - ");
+                               /* -- cut here -- */
+                               if (d > 20*K6_BUG_LOOP) 
+                                       printk("system stability may be impaired when more than 32 MB are used.\n");
+                               else 
+                                       printk("probably OK (after B9730xxxx).\n");
+                               printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
+                       }
+
+                       /* K6 with old style WHCR */
+                       if (c->x86_model < 8 ||
+                          (c->x86_model== 8 && c->x86_mask < 8)) {
+                               /* We can only write allocate on the low 508Mb */
+                               if(mbytes>508)
+                                       mbytes=508;
+
+                               rdmsr(MSR_K6_WHCR, l, h);
+                               if ((l&0x0000FFFF)==0) {
+                                       unsigned long flags;
+                                       l=(1<<0)|((mbytes/4)<<1);
+                                       local_irq_save(flags);
+                                       wbinvd();
+                                       wrmsr(MSR_K6_WHCR, l, h);
+                                       local_irq_restore(flags);
+                                       printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
+                                               mbytes);
+                               }
+                               break;
+                       }
+
+                       if ((c->x86_model == 8 && c->x86_mask >7) ||
+                            c->x86_model == 9 || c->x86_model == 13) {
+                               /* The more serious chips .. */
+
+                               if(mbytes>4092)
+                                       mbytes=4092;
+
+                               rdmsr(MSR_K6_WHCR, l, h);
+                               if ((l&0xFFFF0000)==0) {
+                                       unsigned long flags;
+                                       l=((mbytes>>2)<<22)|(1<<16);
+                                       local_irq_save(flags);
+                                       wbinvd();
+                                       wrmsr(MSR_K6_WHCR, l, h);
+                                       local_irq_restore(flags);
+                                       printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
+                                               mbytes);
+                               }
+
+                               /*  Set MTRR capability flag if appropriate */
+                               if (c->x86_model == 13 || c->x86_model == 9 ||
+                                  (c->x86_model == 8 && c->x86_mask >= 8))
+                                       set_bit(X86_FEATURE_K6_MTRR, c->x86_capability);
+                               break;
+                       }
+                       break;
+
+               case 6: /* An Athlon/Duron */
+                       /* Bit 15 of Athlon specific MSR 15, needs to be 0
+                        * to enable SSE on Palomino/Morgan/Barton CPU's.
+                        * If the BIOS didn't enable it already, enable it here.
+                        */
+                       if (c->x86_model >= 6 && c->x86_model <= 10) {
+                               if (!cpu_has(c, X86_FEATURE_XMM)) {
+                                       printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
+                                       rdmsr(MSR_K7_HWCR, l, h);
+                                       l &= ~0x00008000;
+                                       wrmsr(MSR_K7_HWCR, l, h);
+                                       set_bit(X86_FEATURE_XMM, c->x86_capability);
+                               }
+                       }
+
+                       /* It's been determined by AMD that Athlons since model 8 stepping 1
+                        * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
+                        * As per AMD technical note 27212 0.2
+                        */
+                       if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) {
+                               rdmsr(MSR_K7_CLK_CTL, l, h);
+                               if ((l & 0xfff00000) != 0x20000000) {
+                                       printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
+                                               ((l & 0x000fffff)|0x20000000));
+                                       wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
+                               }
+                       }
+                       break;
+       }
+
+       switch (c->x86) {
+       case 15:
+               set_bit(X86_FEATURE_K8, c->x86_capability);
+               break;
+       case 6:
+               set_bit(X86_FEATURE_K7, c->x86_capability); 
+               break;
+       }
+
+       display_cacheinfo(c);
+       detect_ht(c);
+
+#ifdef CONFIG_X86_HT
+       /* AMD dual core looks like HT but isn't really. Hide it from the
+          scheduler. This works around problems with the domain scheduler.
+          Also probably gives slightly better scheduling and disables
+          SMT nice which is harmful on dual core.
+          TBD tune the domain scheduler for dual core. */
+       if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
+               smp_num_siblings = 1;
+#endif
+
+       if (cpuid_eax(0x80000000) >= 0x80000008) {
+               c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
+               if (c->x86_num_cores & (c->x86_num_cores - 1))
+                       c->x86_num_cores = 1;
+       }
+}
+
+static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
+{
+       /* AMD errata T13 (order #21922) */
+       if ((c->x86 == 6)) {
+               if (c->x86_model == 3 && c->x86_mask == 0)      /* Duron Rev A0 */
+                       size = 64;
+               if (c->x86_model == 4 &&
+                   (c->x86_mask==0 || c->x86_mask==1)) /* Tbird rev A1/A2 */
+                       size = 256;
+       }
+       return size;
+}
+
+static struct cpu_dev amd_cpu_dev __initdata = {
+       .c_vendor       = "AMD",
+       .c_ident        = { "AuthenticAMD" },
+       .c_models = {
+               { .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
+                 {
+                         [3] = "486 DX/2",
+                         [7] = "486 DX/2-WB",
+                         [8] = "486 DX/4", 
+                         [9] = "486 DX/4-WB", 
+                         [14] = "Am5x86-WT",
+                         [15] = "Am5x86-WB" 
+                 }
+               },
+       },
+       .c_init         = init_amd,
+       .c_identify     = generic_identify,
+       .c_size_cache   = amd_size_cache,
+};
+
+int __init amd_init_cpu(void)
+{
+       cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev;
+       return 0;
+}
+
+//early_arch_initcall(amd_init_cpu);
diff --git a/xen/arch/x86/cpu/centaur.c b/xen/arch/x86/cpu/centaur.c
new file mode 100644 (file)
index 0000000..09e5498
--- /dev/null
@@ -0,0 +1,477 @@
+#include <xen/config.h>
+#include <xen/lib.h>
+#include <xen/init.h>
+#include <xen/bitops.h>
+#include <asm/processor.h>
+#include <asm/msr.h>
+#include <asm/e820.h>
+#include "cpu.h"
+
+#ifdef CONFIG_X86_OOSTORE
+
+static u32 __init power2(u32 x)
+{
+       u32 s=1;
+       while(s<=x)
+               s<<=1;
+       return s>>=1;
+}
+
+
+/*
+ *     Set up an actual MCR
+ */
+static void __init centaur_mcr_insert(int reg, u32 base, u32 size, int key)
+{
+       u32 lo, hi;
+       
+       hi = base & ~0xFFF;
+       lo = ~(size-1);         /* Size is a power of 2 so this makes a mask */
+       lo &= ~0xFFF;           /* Remove the ctrl value bits */
+       lo |= key;              /* Attribute we wish to set */
+       wrmsr(reg+MSR_IDT_MCR0, lo, hi);
+       mtrr_centaur_report_mcr(reg, lo, hi);   /* Tell the mtrr driver */
+}
+
+/*
+ *     Figure what we can cover with MCR's
+ *
+ *     Shortcut: We know you can't put 4Gig of RAM on a winchip
+ */
+
+static u32 __init ramtop(void)         /* 16388 */
+{
+       int i;
+       u32 top = 0;
+       u32 clip = 0xFFFFFFFFUL;
+       
+       for (i = 0; i < e820.nr_map; i++) {
+               unsigned long start, end;
+
+               if (e820.map[i].addr > 0xFFFFFFFFUL)
+                       continue;
+               /*
+                *      Don't MCR over reserved space. Ignore the ISA hole
+                *      we frob around that catastrophy already
+                */
+                                       
+               if (e820.map[i].type == E820_RESERVED)
+               {
+                       if(e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip)
+                               clip = e820.map[i].addr;
+                       continue;
+               }
+               start = e820.map[i].addr;
+               end = e820.map[i].addr + e820.map[i].size;
+               if (start >= end)
+                       continue;
+               if (end > top)
+                       top = end;
+       }
+       /* Everything below 'top' should be RAM except for the ISA hole.
+          Because of the limited MCR's we want to map NV/ACPI into our
+          MCR range for gunk in RAM 
+          
+          Clip might cause us to MCR insufficient RAM but that is an
+          acceptable failure mode and should only bite obscure boxes with
+          a VESA hole at 15Mb
+          
+          The second case Clip sometimes kicks in is when the EBDA is marked
+          as reserved. Again we fail safe with reasonable results
+       */
+       
+       if(top>clip)
+               top=clip;
+               
+       return top;
+}
+
+/*
+ *     Compute a set of MCR's to give maximum coverage
+ */
+
+static int __init centaur_mcr_compute(int nr, int key)
+{
+       u32 mem = ramtop();
+       u32 root = power2(mem);
+       u32 base = root;
+       u32 top = root;
+       u32 floor = 0;
+       int ct = 0;
+       
+       while(ct<nr)
+       {
+               u32 fspace = 0;
+
+               /*
+                *      Find the largest block we will fill going upwards
+                */
+
+               u32 high = power2(mem-top);     
+
+               /*
+                *      Find the largest block we will fill going downwards
+                */
+
+               u32 low = base/2;
+
+               /*
+                *      Don't fill below 1Mb going downwards as there
+                *      is an ISA hole in the way.
+                */             
+                
+               if(base <= 1024*1024)
+                       low = 0;
+                       
+               /*
+                *      See how much space we could cover by filling below
+                *      the ISA hole
+                */
+                
+               if(floor == 0)
+                       fspace = 512*1024;
+               else if(floor ==512*1024)
+                       fspace = 128*1024;
+
+               /* And forget ROM space */
+               
+               /*
+                *      Now install the largest coverage we get
+                */
+                
+               if(fspace > high && fspace > low)
+               {
+                       centaur_mcr_insert(ct, floor, fspace, key);
+                       floor += fspace;
+               }
+               else if(high > low)
+               {
+                       centaur_mcr_insert(ct, top, high, key);
+                       top += high;
+               }
+               else if(low > 0)
+               {
+                       base -= low;
+                       centaur_mcr_insert(ct, base, low, key);
+               }
+               else break;
+               ct++;
+       }
+       /*
+        *      We loaded ct values. We now need to set the mask. The caller
+        *      must do this bit.
+        */
+        
+       return ct;
+}
+
+static void __init centaur_create_optimal_mcr(void)
+{
+       int i;
+       /*
+        *      Allocate up to 6 mcrs to mark as much of ram as possible
+        *      as write combining and weak write ordered.
+        *
+        *      To experiment with: Linux never uses stack operations for 
+        *      mmio spaces so we could globally enable stack operation wc
+        *
+        *      Load the registers with type 31 - full write combining, all
+        *      writes weakly ordered.
+        */
+       int used = centaur_mcr_compute(6, 31);
+
+       /*
+        *      Wipe unused MCRs
+        */
+        
+       for(i=used;i<8;i++)
+               wrmsr(MSR_IDT_MCR0+i, 0, 0);
+}
+
+static void __init winchip2_create_optimal_mcr(void)
+{
+       u32 lo, hi;
+       int i;
+
+       /*
+        *      Allocate up to 6 mcrs to mark as much of ram as possible
+        *      as write combining, weak store ordered.
+        *
+        *      Load the registers with type 25
+        *              8       -       weak write ordering
+        *              16      -       weak read ordering
+        *              1       -       write combining
+        */
+
+       int used = centaur_mcr_compute(6, 25);
+       
+       /*
+        *      Mark the registers we are using.
+        */
+        
+       rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
+       for(i=0;i<used;i++)
+               lo|=1<<(9+i);
+       wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
+       
+       /*
+        *      Wipe unused MCRs
+        */
+        
+       for(i=used;i<8;i++)
+               wrmsr(MSR_IDT_MCR0+i, 0, 0);
+}
+
+/*
+ *     Handle the MCR key on the Winchip 2.
+ */
+
+static void __init winchip2_unprotect_mcr(void)
+{
+       u32 lo, hi;
+       u32 key;
+       
+       rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
+       lo&=~0x1C0;     /* blank bits 8-6 */
+       key = (lo>>17) & 7;
+       lo |= key<<6;   /* replace with unlock key */
+       wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
+}
+
+static void __init winchip2_protect_mcr(void)
+{
+       u32 lo, hi;
+       
+       rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
+       lo&=~0x1C0;     /* blank bits 8-6 */
+       wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
+}
+#endif /* CONFIG_X86_OOSTORE */
+
+#define ACE_PRESENT    (1 << 6)
+#define ACE_ENABLED    (1 << 7)
+#define ACE_FCR                (1 << 28)       /* MSR_VIA_FCR */
+
+#define RNG_PRESENT    (1 << 2)
+#define RNG_ENABLED    (1 << 3)
+#define RNG_ENABLE     (1 << 6)        /* MSR_VIA_RNG */
+
+static void __init init_c3(struct cpuinfo_x86 *c)
+{
+       u32  lo, hi;
+
+       /* Test for Centaur Extended Feature Flags presence */
+       if (cpuid_eax(0xC0000000) >= 0xC0000001) {
+               u32 tmp = cpuid_edx(0xC0000001);
+
+               /* enable ACE unit, if present and disabled */
+               if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
+                       rdmsr (MSR_VIA_FCR, lo, hi);
+                       lo |= ACE_FCR;          /* enable ACE unit */
+                       wrmsr (MSR_VIA_FCR, lo, hi);
+                       printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n");
+               }
+
+               /* enable RNG unit, if present and disabled */
+               if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
+                       rdmsr (MSR_VIA_RNG, lo, hi);
+                       lo |= RNG_ENABLE;       /* enable RNG unit */
+                       wrmsr (MSR_VIA_RNG, lo, hi);
+                       printk(KERN_INFO "CPU: Enabled h/w RNG\n");
+               }
+
+               /* store Centaur Extended Feature Flags as
+                * word 5 of the CPU capability bit array
+                */
+               c->x86_capability[5] = cpuid_edx(0xC0000001);
+       }
+
+       /* Cyrix III family needs CX8 & PGE explicity enabled. */
+       if (c->x86_model >=6 && c->x86_model <= 9) {
+               rdmsr (MSR_VIA_FCR, lo, hi);
+               lo |= (1<<1 | 1<<7);
+               wrmsr (MSR_VIA_FCR, lo, hi);
+               set_bit(X86_FEATURE_CX8, c->x86_capability);
+       }
+
+       /* Before Nehemiah, the C3's had 3dNOW! */
+       if (c->x86_model >=6 && c->x86_model <9)
+               set_bit(X86_FEATURE_3DNOW, c->x86_capability);
+
+       get_model_name(c);
+       display_cacheinfo(c);
+}
+
+static void __init init_centaur(struct cpuinfo_x86 *c)
+{
+       enum {
+               ECX8=1<<1,
+               EIERRINT=1<<2,
+               DPM=1<<3,
+               DMCE=1<<4,
+               DSTPCLK=1<<5,
+               ELINEAR=1<<6,
+               DSMC=1<<7,
+               DTLOCK=1<<8,
+               EDCTLB=1<<8,
+               EMMX=1<<9,
+               DPDC=1<<11,
+               EBRPRED=1<<12,
+               DIC=1<<13,
+               DDC=1<<14,
+               DNA=1<<15,
+               ERETSTK=1<<16,
+               E2MMX=1<<19,
+               EAMD3D=1<<20,
+       };
+
+       char *name;
+       u32  fcr_set=0;
+       u32  fcr_clr=0;
+       u32  lo,hi,newlo;
+       u32  aa,bb,cc,dd;
+
+       /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
+          3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
+       clear_bit(0*32+31, c->x86_capability);
+
+       switch (c->x86) {
+
+               case 5:
+                       switch(c->x86_model) {
+                       case 4:
+                               name="C6";
+                               fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
+                               fcr_clr=DPDC;
+                               printk(KERN_NOTICE "Disabling bugged TSC.\n");
+                               clear_bit(X86_FEATURE_TSC, c->x86_capability);
+#ifdef CONFIG_X86_OOSTORE
+                               centaur_create_optimal_mcr();
+                               /* Enable
+                                       write combining on non-stack, non-string
+                                       write combining on string, all types
+                                       weak write ordering 
+                                       
+                                  The C6 original lacks weak read order 
+                                  
+                                  Note 0x120 is write only on Winchip 1 */
+                                  
+                               wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
+#endif                         
+                               break;
+                       case 8:
+                               switch(c->x86_mask) {
+                               default:
+                                       name="2";
+                                       break;
+                               case 7 ... 9:
+                                       name="2A";
+                                       break;
+                               case 10 ... 15:
+                                       name="2B";
+                                       break;
+                               }
+                               fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
+                               fcr_clr=DPDC;
+#ifdef CONFIG_X86_OOSTORE
+                               winchip2_unprotect_mcr();
+                               winchip2_create_optimal_mcr();
+                               rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
+                               /* Enable
+                                       write combining on non-stack, non-string
+                                       write combining on string, all types
+                                       weak write ordering 
+                               */
+                               lo|=31;                         
+                               wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
+                               winchip2_protect_mcr();
+#endif
+                               break;
+                       case 9:
+                               name="3";
+                               fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
+                               fcr_clr=DPDC;
+#ifdef CONFIG_X86_OOSTORE
+                               winchip2_unprotect_mcr();
+                               winchip2_create_optimal_mcr();
+                               rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
+                               /* Enable
+                                       write combining on non-stack, non-string
+                                       write combining on string, all types
+                                       weak write ordering 
+                               */
+                               lo|=31;                         
+                               wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
+                               winchip2_protect_mcr();
+#endif
+                               break;
+                       case 10:
+                               name="4";
+                               /* no info on the WC4 yet */
+                               break;
+                       default:
+                               name="??";
+                       }
+
+                       rdmsr(MSR_IDT_FCR1, lo, hi);
+                       newlo=(lo|fcr_set) & (~fcr_clr);
+
+                       if (newlo!=lo) {
+                               printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo );
+                               wrmsr(MSR_IDT_FCR1, newlo, hi );
+                       } else {
+                               printk(KERN_INFO "Centaur FCR is 0x%X\n",lo);
+                       }
+                       /* Emulate MTRRs using Centaur's MCR. */
+                       set_bit(X86_FEATURE_CENTAUR_MCR, c->x86_capability);
+                       /* Report CX8 */
+                       set_bit(X86_FEATURE_CX8, c->x86_capability);
+                       /* Set 3DNow! on Winchip 2 and above. */
+                       if (c->x86_model >=8)
+                               set_bit(X86_FEATURE_3DNOW, c->x86_capability);
+                       /* See if we can find out some more. */
+                       if ( cpuid_eax(0x80000000) >= 0x80000005 ) {
+                               /* Yes, we can. */
+                               cpuid(0x80000005,&aa,&bb,&cc,&dd);
+                               /* Add L1 data and code cache sizes. */
+                               c->x86_cache_size = (cc>>24)+(dd>>24);
+                       }
+                       sprintf( c->x86_model_id, "WinChip %s", name );
+                       break;
+
+               case 6:
+                       init_c3(c);
+                       break;
+       }
+}
+
+static unsigned int centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size)
+{
+       /* VIA C3 CPUs (670-68F) need further shifting. */
+       if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
+               size >>= 8;
+
+       /* VIA also screwed up Nehemiah stepping 1, and made
+          it return '65KB' instead of '64KB'
+          - Note, it seems this may only be in engineering samples. */
+       if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65))
+               size -=1;
+
+       return size;
+}
+
+static struct cpu_dev centaur_cpu_dev __initdata = {
+       .c_vendor       = "Centaur",
+       .c_ident        = { "CentaurHauls" },
+       .c_init         = init_centaur,
+       .c_size_cache   = centaur_size_cache,
+};
+
+int __init centaur_init_cpu(void)
+{
+       cpu_devs[X86_VENDOR_CENTAUR] = &centaur_cpu_dev;
+       return 0;
+}
+
+//early_arch_initcall(centaur_init_cpu);
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
new file mode 100644 (file)
index 0000000..145ef61
--- /dev/null
@@ -0,0 +1,579 @@
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/string.h>
+#include <xen/delay.h>
+#include <xen/smp.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/msr.h>
+#include <asm/io.h>
+#include <asm/mpspec.h>
+#include <asm/apic.h>
+#include <mach_apic.h>
+
+#include "cpu.h"
+
+#define tsc_disable 0
+#define disable_pse 0
+
+static int cachesize_override __initdata = -1;
+static int disable_x86_fxsr __initdata = 0;
+static int disable_x86_serial_nr __initdata = 1;
+
+struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
+
+extern void mcheck_init(struct cpuinfo_x86 *c);
+
+static void default_init(struct cpuinfo_x86 * c)
+{
+       /* Not much we can do here... */
+       /* Check if at least it has cpuid */
+       if (c->cpuid_level == -1) {
+               /* No cpuid. It must be an ancient CPU */
+               if (c->x86 == 4)
+                       strcpy(c->x86_model_id, "486");
+               else if (c->x86 == 3)
+                       strcpy(c->x86_model_id, "386");
+       }
+}
+
+static struct cpu_dev default_cpu = {
+       .c_init = default_init,
+};
+static struct cpu_dev * this_cpu = &default_cpu;
+
+int __init get_model_name(struct cpuinfo_x86 *c)
+{
+       unsigned int *v;
+       char *p, *q;
+
+       if (cpuid_eax(0x80000000) < 0x80000004)
+               return 0;
+
+       v = (unsigned int *) c->x86_model_id;
+       cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
+       cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
+       cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
+       c->x86_model_id[48] = 0;
+
+       /* Intel chips right-justify this string for some dumb reason;
+          undo that brain damage */
+       p = q = &c->x86_model_id[0];
+       while ( *p == ' ' )
+            p++;
+       if ( p != q ) {
+            while ( *p )
+                 *q++ = *p++;
+            while ( q <= &c->x86_model_id[48] )
+                 *q++ = '\0';  /* Zero-pad the rest */
+       }
+
+       return 1;
+}
+
+
+void __init display_cacheinfo(struct cpuinfo_x86 *c)
+{
+       unsigned int n, dummy, ecx, edx, l2size;
+
+       n = cpuid_eax(0x80000000);
+
+       if (n >= 0x80000005) {
+               cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
+               printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
+                       edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
+               c->x86_cache_size=(ecx>>24)+(edx>>24);  
+       }
+
+       if (n < 0x80000006)     /* Some chips just has a large L1. */
+               return;
+
+       ecx = cpuid_ecx(0x80000006);
+       l2size = ecx >> 16;
+       
+       /* do processor-specific cache resizing */
+       if (this_cpu->c_size_cache)
+               l2size = this_cpu->c_size_cache(c,l2size);
+
+       /* Allow user to override all this if necessary. */
+       if (cachesize_override != -1)
+               l2size = cachesize_override;
+
+       if ( l2size == 0 )
+               return;         /* Again, no L2 cache is possible */
+
+       c->x86_cache_size = l2size;
+
+       printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
+              l2size, ecx & 0xFF);
+}
+
+/* Naming convention should be: <Name> [(<Codename>)] */
+/* This table only is used unless init_<vendor>() below doesn't set it; */
+/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
+
+/* Look up CPU names by table lookup. */
+static char __init *table_lookup_model(struct cpuinfo_x86 *c)
+{
+       struct cpu_model_info *info;
+
+       if ( c->x86_model >= 16 )
+               return NULL;    /* Range check */
+
+       if (!this_cpu)
+               return NULL;
+
+       info = this_cpu->c_models;
+
+       while (info && info->family) {
+               if (info->family == c->x86)
+                       return info->model_names[c->x86_model];
+               info++;
+       }
+       return NULL;            /* Not found */
+}
+
+
+void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early)
+{
+       char *v = c->x86_vendor_id;
+       int i;
+
+       for (i = 0; i < X86_VENDOR_NUM; i++) {
+               if (cpu_devs[i]) {
+                       if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
+                           (cpu_devs[i]->c_ident[1] && 
+                            !strcmp(v,cpu_devs[i]->c_ident[1]))) {
+                               c->x86_vendor = i;
+                               if (!early)
+                                       this_cpu = cpu_devs[i];
+                               break;
+                       }
+               }
+       }
+}
+
+
+static int __init x86_fxsr_setup(char * s)
+{
+       disable_x86_fxsr = 1;
+       return 1;
+}
+__setup("nofxsr", x86_fxsr_setup);
+
+
+/* Standard macro to see if a specific flag is changeable */
+static inline int flag_is_changeable_p(unsigned long flag)
+{
+       unsigned long f1, f2;
+
+       asm("pushf\n\t"
+           "pushf\n\t"
+           "pop %0\n\t"
+           "mov %0,%1\n\t"
+           "xor %2,%0\n\t"
+           "push %0\n\t"
+           "popf\n\t"
+           "pushf\n\t"
+           "pop %0\n\t"
+           "popf\n\t"
+           : "=&r" (f1), "=&r" (f2)
+           : "ir" (flag));
+
+       return ((f1^f2) & flag) != 0;
+}
+
+
+/* Probe for the CPUID instruction */
+int __init have_cpuid_p(void)
+{
+       return flag_is_changeable_p(X86_EFLAGS_ID);
+}
+
+/* Do minimum CPU detection early.
+   Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
+   The others are not touched to avoid unwanted side effects. */
+void __init early_cpu_detect(void)
+{
+       struct cpuinfo_x86 *c = &boot_cpu_data;
+
+       c->x86_cache_alignment = 32;
+
+       if (!have_cpuid_p())
+               return;
+
+       /* Get vendor name */
+       cpuid(0x00000000, &c->cpuid_level,
+             (int *)&c->x86_vendor_id[0],
+             (int *)&c->x86_vendor_id[8],
+             (int *)&c->x86_vendor_id[4]);
+
+       get_cpu_vendor(c, 1);
+
+       c->x86 = 4;
+       if (c->cpuid_level >= 0x00000001) {
+               u32 junk, tfms, cap0, misc;
+               cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
+               c->x86 = (tfms >> 8) & 15;
+               c->x86_model = (tfms >> 4) & 15;
+               if (c->x86 == 0xf) {
+                       c->x86 += (tfms >> 20) & 0xff;
+                       c->x86_model += ((tfms >> 16) & 0xF) << 4;
+               }
+               c->x86_mask = tfms & 15;
+               if (cap0 & (1<<19))
+                       c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
+       }
+
+       early_intel_workaround(c);
+}
+
+void __init generic_identify(struct cpuinfo_x86 * c)
+{
+       u32 tfms, xlvl;
+       int junk;
+
+       if (have_cpuid_p()) {
+               /* Get vendor name */
+               cpuid(0x00000000, &c->cpuid_level,
+                     (int *)&c->x86_vendor_id[0],
+                     (int *)&c->x86_vendor_id[8],
+                     (int *)&c->x86_vendor_id[4]);
+               
+               get_cpu_vendor(c, 0);
+               /* Initialize the standard set of capabilities */
+               /* Note that the vendor-specific code below might override */
+       
+               /* Intel-defined flags: level 0x00000001 */
+               if ( c->cpuid_level >= 0x00000001 ) {
+                       u32 capability, excap;
+                       cpuid(0x00000001, &tfms, &junk, &excap, &capability);
+                       c->x86_capability[0] = capability;
+                       c->x86_capability[4] = excap;
+                       c->x86 = (tfms >> 8) & 15;
+                       c->x86_model = (tfms >> 4) & 15;
+                       if (c->x86 == 0xf) {
+                               c->x86 += (tfms >> 20) & 0xff;
+                               c->x86_model += ((tfms >> 16) & 0xF) << 4;
+                       } 
+                       c->x86_mask = tfms & 15;
+               } else {
+                       /* Have CPUID level 0 only - unheard of */
+                       c->x86 = 4;
+               }
+
+               /* AMD-defined flags: level 0x80000001 */
+               xlvl = cpuid_eax(0x80000000);
+               if ( (xlvl & 0xffff0000) == 0x80000000 ) {
+                       if ( xlvl >= 0x80000001 ) {
+                               c->x86_capability[1] = cpuid_edx(0x80000001);
+                               c->x86_capability[6] = cpuid_ecx(0x80000001);
+                       }
+                       if ( xlvl >= 0x80000004 )
+                               get_model_name(c); /* Default name */
+               }
+       }
+}
+
+static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
+{
+       if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
+               /* Disable processor serial number */
+               unsigned long lo,hi;
+               rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
+               lo |= 0x200000;
+               wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
+               printk(KERN_NOTICE "CPU serial number disabled.\n");
+               clear_bit(X86_FEATURE_PN, c->x86_capability);
+
+               /* Disabling the serial number may affect the cpuid level */
+               c->cpuid_level = cpuid_eax(0);
+       }
+}
+
+static int __init x86_serial_nr_setup(char *s)
+{
+       disable_x86_serial_nr = 0;
+       return 1;
+}
+__setup("serialnumber", x86_serial_nr_setup);
+
+
+
+/*
+ * This does the hard work of actually picking apart the CPU stuff...
+ */
+void __init identify_cpu(struct cpuinfo_x86 *c)
+{
+       int i;
+
+       c->x86_cache_size = -1;
+       c->x86_vendor = X86_VENDOR_UNKNOWN;
+       c->cpuid_level = -1;    /* CPUID not detected */
+       c->x86_model = c->x86_mask = 0; /* So far unknown... */
+       c->x86_vendor_id[0] = '\0'; /* Unset */
+       c->x86_model_id[0] = '\0';  /* Unset */
+       c->x86_num_cores = 1;
+       memset(&c->x86_capability, 0, sizeof c->x86_capability);
+
+       if (!have_cpuid_p()) {
+               /* First of all, decide if this is a 486 or higher */
+               /* It's a 486 if we can modify the AC flag */
+               if ( flag_is_changeable_p(X86_EFLAGS_AC) )
+                       c->x86 = 4;
+               else
+                       c->x86 = 3;
+       }
+
+       generic_identify(c);
+
+#ifdef NOISY_CAPS
+       printk(KERN_DEBUG "CPU: After generic identify, caps:");
+       for (i = 0; i < NCAPINTS; i++)
+               printk(" %08lx", c->x86_capability[i]);
+       printk("\n");
+#endif
+
+       if (this_cpu->c_identify) {
+               this_cpu->c_identify(c);
+#ifdef NOISY_CAPS
+               printk(KERN_DEBUG "CPU: After vendor identify, caps:");
+               for (i = 0; i < NCAPINTS; i++)
+                       printk(" %08lx", c->x86_capability[i]);
+               printk("\n");
+#endif
+       }
+
+       /*
+        * Vendor-specific initialization.  In this section we
+        * canonicalize the feature flags, meaning if there are
+        * features a certain CPU supports which CPUID doesn't
+        * tell us, CPUID claiming incorrect flags, or other bugs,
+        * we handle them here.
+        *
+        * At the end of this section, c->x86_capability better
+        * indicate the features this CPU genuinely supports!
+        */
+       if (this_cpu->c_init)
+               this_cpu->c_init(c);
+
+       /* Disable the PN if appropriate */
+       squash_the_stupid_serial_number(c);
+
+       /*
+        * The vendor-specific functions might have changed features.  Now
+        * we do "generic changes."
+        */
+
+       /* TSC disabled? */
+       if ( tsc_disable )
+               clear_bit(X86_FEATURE_TSC, c->x86_capability);
+
+       /* FXSR disabled? */
+       if (disable_x86_fxsr) {
+               clear_bit(X86_FEATURE_FXSR, c->x86_capability);
+               clear_bit(X86_FEATURE_XMM, c->x86_capability);
+       }
+
+       if (disable_pse)
+               clear_bit(X86_FEATURE_PSE, c->x86_capability);
+
+       /* If the model name is still unset, do table lookup. */
+       if ( !c->x86_model_id[0] ) {
+               char *p;
+               p = table_lookup_model(c);
+               if ( p )
+                       strcpy(c->x86_model_id, p);
+               else
+                       /* Last resort... */
+                       sprintf(c->x86_model_id, "%02x/%02x",
+                               c->x86_vendor, c->x86_model);
+       }
+
+       /* Now the feature flags better reflect actual CPU features! */
+#ifdef NOISY_CAPS
+       printk(KERN_DEBUG "CPU: After all inits, caps:");
+       for (i = 0; i < NCAPINTS; i++)
+               printk(" %08lx", c->x86_capability[i]);
+       printk("\n");
+#endif
+       /*
+        * On SMP, boot_cpu_data holds the common feature set between
+        * all CPUs; so make sure that we indicate which features are
+        * common between the CPUs.  The first time this routine gets
+        * executed, c == &boot_cpu_data.
+        */
+       if ( c != &boot_cpu_data ) {
+               /* AND the already accumulated flags with these */
+               for ( i = 0 ; i < NCAPINTS ; i++ )
+                       boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
+       }
+
+       /* Init Machine Check Exception if available. */
+#ifdef CONFIG_X86_MCE
+       mcheck_init(c);
+#endif
+}
+/*
+ *     Perform early boot up checks for a valid TSC. See arch/i386/kernel/time.c
+ */
+void __init dodgy_tsc(void)
+{
+       if (( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX ) ||
+           ( boot_cpu_data.x86_vendor == X86_VENDOR_NSC   ))
+               cpu_devs[X86_VENDOR_CYRIX]->c_init(&boot_cpu_data);
+}
+
+#ifdef CONFIG_X86_HT
+void __init detect_ht(struct cpuinfo_x86 *c)
+{
+       u32     eax, ebx, ecx, edx;
+       int     index_lsb, index_msb, tmp;
+       int     cpu = smp_processor_id();
+
+       if (!cpu_has(c, X86_FEATURE_HT))
+               return;
+
+       cpuid(1, &eax, &ebx, &ecx, &edx);
+       smp_num_siblings = (ebx & 0xff0000) >> 16;
+
+       if (smp_num_siblings == 1) {
+               printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
+       } else if (smp_num_siblings > 1 ) {
+               index_lsb = 0;
+               index_msb = 31;
+
+               if (smp_num_siblings > NR_CPUS) {
+                       printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
+                       smp_num_siblings = 1;
+                       return;
+               }
+               tmp = smp_num_siblings;
+               while ((tmp & 1) == 0) {
+                       tmp >>=1 ;
+                       index_lsb++;
+               }
+               tmp = smp_num_siblings;
+               while ((tmp & 0x80000000 ) == 0) {
+                       tmp <<=1 ;
+                       index_msb--;
+               }
+               if (index_lsb != index_msb )
+                       index_msb++;
+               phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
+
+               printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
+                      phys_proc_id[cpu]);
+       }
+}
+#endif
+
+void __init print_cpu_info(struct cpuinfo_x86 *c)
+{
+       char *vendor = NULL;
+
+       if (c->x86_vendor < X86_VENDOR_NUM)
+               vendor = this_cpu->c_vendor;
+       else if (c->cpuid_level >= 0)
+               vendor = c->x86_vendor_id;
+
+       if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
+               printk("%s ", vendor);
+
+       if (!c->x86_model_id[0])
+               printk("%d86", c->x86);
+       else
+               printk("%s", c->x86_model_id);
+
+       if (c->x86_mask || c->cpuid_level >= 0) 
+               printk(" stepping %02x\n", c->x86_mask);
+       else
+               printk("\n");
+}
+
+cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
+
+/* This is hacky. :)
+ * We're emulating future behavior.
+ * In the future, the cpu-specific init functions will be called implicitly
+ * via the magic of initcalls.
+ * They will insert themselves into the cpu_devs structure.
+ * Then, when cpu_init() is called, we can just iterate over that array.
+ */
+
+extern int intel_cpu_init(void);
+extern int cyrix_init_cpu(void);
+extern int nsc_init_cpu(void);
+extern int amd_init_cpu(void);
+extern int centaur_init_cpu(void);
+extern int transmeta_init_cpu(void);
+extern int rise_init_cpu(void);
+void early_cpu_detect(void);
+
+void __init early_cpu_init(void)
+{
+       intel_cpu_init();
+       amd_init_cpu();
+#ifdef CONFIG_X86_32
+       cyrix_init_cpu();
+       nsc_init_cpu();
+       centaur_init_cpu();
+       transmeta_init_cpu();
+       rise_init_cpu();
+#endif
+       early_cpu_detect();
+}
+/*
+ * cpu_init() initializes state that is per-CPU. Some data is already
+ * initialized (naturally) in the bootstrap process, such as the GDT
+ * and IDT. We reload them nevertheless, this function acts as a
+ * 'CPU state barrier', nothing should get across.
+ */
+void __init cpu_init (void)
+{
+       int cpu = smp_processor_id();
+       struct tss_struct *t = &init_tss[cpu];
+       char gdt_load[10];
+
+       if (cpu_test_and_set(cpu, cpu_initialized)) {
+               printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
+               for (;;) local_irq_enable();
+       }
+       printk(KERN_INFO "Initializing CPU#%d\n", cpu);
+
+       if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
+               clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
+
+       *(unsigned short *)(&gdt_load[0]) = LAST_RESERVED_GDT_BYTE;
+       *(unsigned long  *)(&gdt_load[2]) = GDT_VIRT_START(current);
+       __asm__ __volatile__ ( "lgdt %0" : "=m" (gdt_load) );
+
+       /* No nested task. */
+       __asm__("pushf ; andw $0xbfff,(%"__OP"sp) ; popf");
+
+       /* Ensure FPU gets initialised for each domain. */
+       stts();
+
+       /* Set up and load the per-CPU TSS and LDT. */
+       t->bitmap = IOBMP_INVALID_OFFSET;
+#if defined(CONFIG_X86_32)
+       t->ss0  = __HYPERVISOR_DS;
+       t->esp0 = get_stack_bottom();
+#elif defined(CONFIG_X86_64)
+       /* Bottom-of-stack must be 16-byte aligned! */
+       BUG_ON((get_stack_bottom() & 15) != 0);
+       t->rsp0 = get_stack_bottom();
+#endif
+       set_tss_desc(cpu,t);
+       load_TR(cpu);
+       __asm__ __volatile__ ( "lldt %%ax" : : "a" (0) );
+
+       /* Clear all 6 debug registers: */
+#define CD(register) __asm__("mov %0,%%db" #register ::"r"(0UL) );
+       CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
+#undef CD
+
+       /* Install correct page table. */
+       write_ptbase(current);
+}
diff --git a/xen/arch/x86/cpu/cpu.h b/xen/arch/x86/cpu/cpu.h
new file mode 100644 (file)
index 0000000..9df38d9
--- /dev/null
@@ -0,0 +1,31 @@
+
+struct cpu_model_info {
+       int vendor;
+       int family;
+       char *model_names[16];
+};
+
+/* attempt to consolidate cpu attributes */
+struct cpu_dev {
+       char    * c_vendor;
+
+       /* some have two possibilities for cpuid string */
+       char    * c_ident[2];   
+
+       struct          cpu_model_info c_models[4];
+
+       void            (*c_init)(struct cpuinfo_x86 * c);
+       void            (*c_identify)(struct cpuinfo_x86 * c);
+       unsigned int    (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size);
+};
+
+extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM];
+
+extern int get_model_name(struct cpuinfo_x86 *c);
+extern void display_cacheinfo(struct cpuinfo_x86 *c);
+
+extern void generic_identify(struct cpuinfo_x86 * c);
+extern int have_cpuid_p(void);
+
+extern void early_intel_workaround(struct cpuinfo_x86 *c);
+
diff --git a/xen/arch/x86/cpu/cyrix.c b/xen/arch/x86/cpu/cyrix.c
new file mode 100644 (file)
index 0000000..6a3b98e
--- /dev/null
@@ -0,0 +1,400 @@
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/irq.h>
+#include <xen/bitops.h>
+#include <xen/delay.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+
+#include "cpu.h"
+
+/*
+ * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
+ */
+void __init do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
+{
+       unsigned char ccr2, ccr3;
+       unsigned long flags;
+       
+       /* we test for DEVID by checking whether CCR3 is writable */
+       local_irq_save(flags);
+       ccr3 = getCx86(CX86_CCR3);
+       setCx86(CX86_CCR3, ccr3 ^ 0x80);
+       getCx86(0xc0);   /* dummy to change bus */
+
+       if (getCx86(CX86_CCR3) == ccr3) {       /* no DEVID regs. */
+               ccr2 = getCx86(CX86_CCR2);
+               setCx86(CX86_CCR2, ccr2 ^ 0x04);
+               getCx86(0xc0);  /* dummy */
+
+               if (getCx86(CX86_CCR2) == ccr2) /* old Cx486SLC/DLC */
+                       *dir0 = 0xfd;
+               else {                          /* Cx486S A step */
+                       setCx86(CX86_CCR2, ccr2);
+                       *dir0 = 0xfe;
+               }
+       }
+       else {
+               setCx86(CX86_CCR3, ccr3);  /* restore CCR3 */
+
+               /* read DIR0 and DIR1 CPU registers */
+               *dir0 = getCx86(CX86_DIR0);
+               *dir1 = getCx86(CX86_DIR1);
+       }
+       local_irq_restore(flags);
+}
+
+/*
+ * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
+ * order to identify the Cyrix CPU model after we're out of setup.c
+ *
+ * Actually since bugs.h doesn't even reference this perhaps someone should
+ * fix the documentation ???
+ */
+static unsigned char Cx86_dir0_msb __initdata = 0;
+
+static char Cx86_model[][9] __initdata = {
+       "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
+       "M II ", "Unknown"
+};
+static char Cx486_name[][5] __initdata = {
+       "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
+       "SRx2", "DRx2"
+};
+static char Cx486S_name[][4] __initdata = {
+       "S", "S2", "Se", "S2e"
+};
+static char Cx486D_name[][4] __initdata = {
+       "DX", "DX2", "?", "?", "?", "DX4"
+};
+static char Cx86_cb[] __initdata = "?.5x Core/Bus Clock";
+static char cyrix_model_mult1[] __initdata = "12??43";
+static char cyrix_model_mult2[] __initdata = "12233445";
+
+/*
+ * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
+ * BIOSes for compatibility with DOS games.  This makes the udelay loop
+ * work correctly, and improves performance.
+ *
+ * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP
+ */
+
+static void __init check_cx686_slop(struct cpuinfo_x86 *c)
+{
+       unsigned long flags;
+       
+       if (Cx86_dir0_msb == 3) {
+               unsigned char ccr3, ccr5;
+
+               local_irq_save(flags);
+               ccr3 = getCx86(CX86_CCR3);
+               setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
+               ccr5 = getCx86(CX86_CCR5);
+               if (ccr5 & 2)
+                       setCx86(CX86_CCR5, ccr5 & 0xfd);  /* reset SLOP */
+               setCx86(CX86_CCR3, ccr3);                 /* disable MAPEN */
+               local_irq_restore(flags);
+       }
+}
+
+
+static void __init set_cx86_reorder(void)
+{
+       u8 ccr3;
+
+       printk(KERN_INFO "Enable Memory access reorder on Cyrix/NSC processor.\n");
+       ccr3 = getCx86(CX86_CCR3);
+       setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
+
+       /* Load/Store Serialize to mem access disable (=reorder it)  */
+       setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
+       /* set load/store serialize from 1GB to 4GB */
+       ccr3 |= 0xe0;
+       setCx86(CX86_CCR3, ccr3);
+}
+
+static void __init set_cx86_memwb(void)
+{
+       u32 cr0;
+
+       printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
+
+       /* CCR2 bit 2: unlock NW bit */
+       setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
+       /* set 'Not Write-through' */
+       cr0 = 0x20000000;
+       __asm__("movl %%cr0,%%eax\n\t"
+               "orl %0,%%eax\n\t"
+               "movl %%eax,%%cr0\n"
+               : : "r" (cr0)
+               :"ax");
+       /* CCR2 bit 2: lock NW bit and set WT1 */
+       setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14 );
+}
+
+static void __init set_cx86_inc(void)
+{
+       unsigned char ccr3;
+
+       printk(KERN_INFO "Enable Incrementor on Cyrix/NSC processor.\n");
+
+       ccr3 = getCx86(CX86_CCR3);
+       setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
+       /* PCR1 -- Performance Control */
+       /* Incrementor on, whatever that is */
+       setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02);
+       /* PCR0 -- Performance Control */
+       /* Incrementor Margin 10 */
+       setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04); 
+       setCx86(CX86_CCR3, ccr3);       /* disable MAPEN */
+}
+
+/*
+ *     Configure later MediaGX and/or Geode processor.
+ */
+
+static void __init geode_configure(void)
+{
+       unsigned long flags;
+       u8 ccr3, ccr4;
+       local_irq_save(flags);
+
+       /* Suspend on halt power saving and enable #SUSP pin */
+       setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
+
+       ccr3 = getCx86(CX86_CCR3);
+       setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);       /* Enable */
+       
+       ccr4 = getCx86(CX86_CCR4);
+       ccr4 |= 0x38;           /* FPU fast, DTE cache, Mem bypass */
+       
+       setCx86(CX86_CCR3, ccr3);
+       
+       set_cx86_memwb();
+       set_cx86_reorder();     
+       set_cx86_inc();
+       
+       local_irq_restore(flags);
+}
+
+
+static void __init init_cyrix(struct cpuinfo_x86 *c)
+{
+       unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
+       char *buf = c->x86_model_id;
+       const char *p = NULL;
+
+       /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
+          3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
+       clear_bit(0*32+31, c->x86_capability);
+
+       /* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */
+       if ( test_bit(1*32+24, c->x86_capability) ) {
+               clear_bit(1*32+24, c->x86_capability);
+               set_bit(X86_FEATURE_CXMMX, c->x86_capability);
+       }
+
+       do_cyrix_devid(&dir0, &dir1);
+
+       check_cx686_slop(c);
+
+       Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family"   */
+       dir0_lsn = dir0 & 0xf;                /* model or clock multiplier */
+
+       /* common case step number/rev -- exceptions handled below */
+       c->x86_model = (dir1 >> 4) + 1;
+       c->x86_mask = dir1 & 0xf;
+
+       /* Now cook; the original recipe is by Channing Corn, from Cyrix.
+        * We do the same thing for each generation: we work out
+        * the model, multiplier and stepping.  Black magic included,
+        * to make the silicon step/rev numbers match the printed ones.
+        */
+        
+       switch (dir0_msn) {
+               unsigned char tmp;
+
+       case 0: /* Cx486SLC/DLC/SRx/DRx */
+               p = Cx486_name[dir0_lsn & 7];
+               break;
+
+       case 1: /* Cx486S/DX/DX2/DX4 */
+               p = (dir0_lsn & 8) ? Cx486D_name[dir0_lsn & 5]
+                       : Cx486S_name[dir0_lsn & 3];
+               break;
+
+       case 2: /* 5x86 */
+               Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
+               p = Cx86_cb+2;
+               break;
+
+       case 3: /* 6x86/6x86L */
+               Cx86_cb[1] = ' ';
+               Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
+               if (dir1 > 0x21) { /* 686L */
+                       Cx86_cb[0] = 'L';
+                       p = Cx86_cb;
+                       (c->x86_model)++;
+               } else             /* 686 */
+                       p = Cx86_cb+1;
+               /* Emulate MTRRs using Cyrix's ARRs. */
+               set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
+               /* 6x86's contain this bug */
+               c->coma_bug = 1;
+               break;
+
+       case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
+               c->x86_cache_size=16;   /* Yep 16K integrated cache thats it */
+               /* GXm supports extended cpuid levels 'ala' AMD */
+               if (c->cpuid_level == 2) {
+                       /* Enable cxMMX extensions (GX1 Datasheet 54) */
+                       setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
+                       
+                       /* GXlv/GXm/GX1 */
+                       if((dir1 >= 0x50 && dir1 <= 0x54) || dir1 >= 0x63)
+                               geode_configure();
+                       get_model_name(c);  /* get CPU marketing name */
+                       return;
+               }
+               else {  /* MediaGX */
+                       Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
+                       p = Cx86_cb+2;
+                       c->x86_model = (dir1 & 0x20) ? 1 : 2;
+               }
+               break;
+
+        case 5: /* 6x86MX/M II */
+               if (dir1 > 7)
+               {
+                       dir0_msn++;  /* M II */
+                       /* Enable MMX extensions (App note 108) */
+                       setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
+               }
+               else
+               {
+                       c->coma_bug = 1;      /* 6x86MX, it has the bug. */
+               }
+               tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
+               Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
+               p = Cx86_cb+tmp;
+               if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
+                       (c->x86_model)++;
+               /* Emulate MTRRs using Cyrix's ARRs. */
+               set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
+               break;
+
+       case 0xf:  /* Cyrix 486 without DEVID registers */
+               switch (dir0_lsn) {
+               case 0xd:  /* either a 486SLC or DLC w/o DEVID */
+                       dir0_msn = 0;
+                       p = Cx486_name[(c->hard_math) ? 1 : 0];
+                       break;
+
+               case 0xe:  /* a 486S A step */
+                       dir0_msn = 0;
+                       p = Cx486S_name[0];
+                       break;
+               }
+               break;
+
+       default:  /* unknown (shouldn't happen, we know everyone ;-) */
+               dir0_msn = 7;
+               break;
+       }
+       strcpy(buf, Cx86_model[dir0_msn & 7]);
+       if (p) strcat(buf, p);
+       return;
+}
+
+/*
+ * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
+ * by the fact that they preserve the flags across the division of 5/2.
+ * PII and PPro exhibit this behavior too, but they have cpuid available.
+ */
+/*
+ * Perform the Cyrix 5/2 test. A Cyrix won't change
+ * the flags, while other 486 chips will.
+ */
+static inline int test_cyrix_52div(void)
+{
+       unsigned int test;
+
+       __asm__ __volatile__(
+            "sahf\n\t"         /* clear flags (%eax = 0x0005) */
+            "div %b2\n\t"      /* divide 5 by 2 */
+            "lahf"             /* store flags into %ah */
+            : "=a" (test)
+            : "0" (5), "q" (2)
+            : "cc");
+
+       /* AH is 0x02 on Cyrix after the divide.. */
+       return (unsigned char) (test >> 8) == 0x02;
+}
+
+static void cyrix_identify(struct cpuinfo_x86 * c)
+{
+       /* Detect Cyrix with disabled CPUID */
+       if ( c->x86 == 4 && test_cyrix_52div() ) {
+               unsigned char dir0, dir1;
+               
+               strcpy(c->x86_vendor_id, "CyrixInstead");
+               c->x86_vendor = X86_VENDOR_CYRIX;
+               
+               /* Actually enable cpuid on the older cyrix */
+           
+               /* Retrieve CPU revisions */
+               
+               do_cyrix_devid(&dir0, &dir1);
+
+               dir0>>=4;               
+               
+               /* Check it is an affected model */
+               
+               if (dir0 == 5 || dir0 == 3)
+               {
+                       unsigned char ccr3, ccr4;
+                       unsigned long flags;
+                       printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
+                       local_irq_save(flags);
+                       ccr3 = getCx86(CX86_CCR3);
+                       setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
+                       ccr4 = getCx86(CX86_CCR4);
+                       setCx86(CX86_CCR4, ccr4 | 0x80);          /* enable cpuid  */
+                       setCx86(CX86_CCR3, ccr3);                 /* disable MAPEN */
+                       local_irq_restore(flags);
+               }
+       }
+       generic_identify(c);
+}
+
+static struct cpu_dev cyrix_cpu_dev __initdata = {
+       .c_vendor       = "Cyrix",
+       .c_ident        = { "CyrixInstead" },
+       .c_init         = init_cyrix,
+       .c_identify     = cyrix_identify,
+};
+
+int __init cyrix_init_cpu(void)
+{
+       cpu_devs[X86_VENDOR_CYRIX] = &cyrix_cpu_dev;
+       return 0;
+}
+
+//early_arch_initcall(cyrix_init_cpu);
+
+static struct cpu_dev nsc_cpu_dev __initdata = {
+       .c_vendor       = "NSC",
+       .c_ident        = { "Geode by NSC" },
+       .c_init         = init_cyrix,
+       .c_identify     = generic_identify,
+};
+
+int __init nsc_init_cpu(void)
+{
+       cpu_devs[X86_VENDOR_NSC] = &nsc_cpu_dev;
+       return 0;
+}
+
+//early_arch_initcall(nsc_init_cpu);
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
new file mode 100644 (file)
index 0000000..58826cb
--- /dev/null
@@ -0,0 +1,244 @@
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/kernel.h>
+#include <xen/string.h>
+#include <xen/bitops.h>
+#include <xen/smp.h>
+#include <asm/processor.h>
+#include <asm/msr.h>
+#include <asm/uaccess.h>
+#include <asm/mpspec.h>
+#include <asm/apic.h>
+#include <mach_apic.h>
+
+#include "cpu.h"
+
+#define select_idle_routine(x) ((void)0)
+
+extern int trap_init_f00f_bug(void);
+
+#ifdef CONFIG_X86_INTEL_USERCOPY
+/*
+ * Alignment at which movsl is preferred for bulk memory copies.
+ */
+struct movsl_mask movsl_mask;
+#endif
+
+void __init early_intel_workaround(struct cpuinfo_x86 *c)
+{
+       if (c->x86_vendor != X86_VENDOR_INTEL)
+               return;
+       /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
+       if (c->x86 == 15 && c->x86_cache_alignment == 64)
+               c->x86_cache_alignment = 128;
+}
+
+/*
+ *     Early probe support logic for ppro memory erratum #50
+ *
+ *     This is called before we do cpu ident work
+ */
+int __init ppro_with_ram_bug(void)
+{
+       /* Uses data from early_cpu_detect now */
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+           boot_cpu_data.x86 == 6 &&
+           boot_cpu_data.x86_model == 1 &&
+           boot_cpu_data.x86_mask < 8) {
+               printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
+               return 1;
+       }
+       return 0;
+}
+       
+
+/*
+ * P4 Xeon errata 037 workaround.
+ * Hardware prefetcher may cause stale data to be loaded into the cache.
+ */
+static void __init Intel_errata_workarounds(struct cpuinfo_x86 *c)
+{
+       unsigned long lo, hi;
+
+       if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
+               rdmsr (MSR_IA32_MISC_ENABLE, lo, hi);
+               if ((lo & (1<<9)) == 0) {
+                       printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
+                       printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
+                       lo |= (1<<9);   /* Disable hw prefetching */
+                       wrmsr (MSR_IA32_MISC_ENABLE, lo, hi);
+               }
+       }
+}
+
+
+static void __init init_intel(struct cpuinfo_x86 *c)
+{
+       unsigned int l2 = 0;
+       char *p = NULL;
+
+#ifdef CONFIG_X86_F00F_BUG
+       /*
+        * All current models of Pentium and Pentium with MMX technology CPUs
+        * have the F0 0F bug, which lets nonprivileged users lock up the system.
+        * Note that the workaround only should be initialized once...
+        */
+       c->f00f_bug = 0;
+       if ( c->x86 == 5 ) {
+               static int f00f_workaround_enabled = 0;
+
+               c->f00f_bug = 1;
+               if ( !f00f_workaround_enabled ) {
+                       trap_init_f00f_bug();
+                       printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
+                       f00f_workaround_enabled = 1;
+               }
+       }
+#endif
+
+       select_idle_routine(c);
+       l2 = init_intel_cacheinfo(c);
+
+       /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */
+       if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
+               clear_bit(X86_FEATURE_SEP, c->x86_capability);
+
+       /* Names for the Pentium II/Celeron processors 
+          detectable only by also checking the cache size.
+          Dixon is NOT a Celeron. */
+       if (c->x86 == 6) {
+               switch (c->x86_model) {
+               case 5:
+                       if (c->x86_mask == 0) {
+                               if (l2 == 0)
+                                       p = "Celeron (Covington)";
+                               else if (l2 == 256)
+                                       p = "Mobile Pentium II (Dixon)";
+                       }
+                       break;
+                       
+               case 6:
+                       if (l2 == 128)
+                               p = "Celeron (Mendocino)";
+                       else if (c->x86_mask == 0 || c->x86_mask == 5)
+                               p = "Celeron-A";
+                       break;
+                       
+               case 8:
+                       if (l2 == 128)
+                               p = "Celeron (Coppermine)";
+                       break;
+               }
+       }
+
+       if ( p )
+               strcpy(c->x86_model_id, p);
+       
+       detect_ht(c);
+
+       /* Work around errata */
+       Intel_errata_workarounds(c);
+
+#ifdef CONFIG_X86_INTEL_USERCOPY
+       /*
+        * Set up the preferred alignment for movsl bulk memory moves
+        */
+       switch (c->x86) {
+       case 4:         /* 486: untested */
+               break;
+       case 5:         /* Old Pentia: untested */
+               break;
+       case 6:         /* PII/PIII only like movsl with 8-byte alignment */
+               movsl_mask.mask = 7;
+               break;
+       case 15:        /* P4 is OK down to 8-byte alignment */
+               movsl_mask.mask = 7;
+               break;
+       }
+#endif
+
+       if (c->x86 == 15) 
+               set_bit(X86_FEATURE_P4, c->x86_capability);
+       if (c->x86 == 6) 
+               set_bit(X86_FEATURE_P3, c->x86_capability);
+}
+
+
+static unsigned int intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
+{
+       /* Intel PIII Tualatin. This comes in two flavours.
+        * One has 256kb of cache, the other 512. We have no way
+        * to determine which, so we use a boottime override
+        * for the 512kb model, and assume 256 otherwise.
+        */
+       if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
+               size = 256;
+       return size;
+}
+
+static struct cpu_dev intel_cpu_dev __initdata = {
+       .c_vendor       = "Intel",
+       .c_ident        = { "GenuineIntel" },
+       .c_models = {
+               { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = 
+                 { 
+                         [0] = "486 DX-25/33", 
+                         [1] = "486 DX-50", 
+                         [2] = "486 SX", 
+                         [3] = "486 DX/2", 
+                         [4] = "486 SL", 
+                         [5] = "486 SX/2", 
+                         [7] = "486 DX/2-WB", 
+                         [8] = "486 DX/4", 
+                         [9] = "486 DX/4-WB"
+                 }
+               },
+               { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names =
+                 { 
+                         [0] = "Pentium 60/66 A-step", 
+                         [1] = "Pentium 60/66", 
+                         [2] = "Pentium 75 - 200",
+                         [3] = "OverDrive PODP5V83", 
+                         [4] = "Pentium MMX",
+                         [7] = "Mobile Pentium 75 - 200", 
+                         [8] = "Mobile Pentium MMX"
+                 }
+               },
+               { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names =
+                 { 
+                         [0] = "Pentium Pro A-step",
+                         [1] = "Pentium Pro", 
+                         [3] = "Pentium II (Klamath)", 
+                         [4] = "Pentium II (Deschutes)", 
+                         [5] = "Pentium II (Deschutes)", 
+                         [6] = "Mobile Pentium II",
+                         [7] = "Pentium III (Katmai)", 
+                         [8] = "Pentium III (Coppermine)", 
+                         [10] = "Pentium III (Cascades)",
+                         [11] = "Pentium III (Tualatin)",
+                 }
+               },
+               { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names =
+                 {
+                         [0] = "Pentium 4 (Unknown)",
+                         [1] = "Pentium 4 (Willamette)",
+                         [2] = "Pentium 4 (Northwood)",
+                         [4] = "Pentium 4 (Foster)",
+                         [5] = "Pentium 4 (Foster)",
+                 }
+               },
+       },
+       .c_init         = init_intel,
+       .c_identify     = generic_identify,
+       .c_size_cache   = intel_size_cache,
+};
+
+__init int intel_cpu_init(void)
+{
+       cpu_devs[X86_VENDOR_INTEL] = &intel_cpu_dev;
+       return 0;
+}
+
+// arch_initcall(intel_cpu_init);
+
diff --git a/xen/arch/x86/cpu/intel_cacheinfo.c b/xen/arch/x86/cpu/intel_cacheinfo.c
new file mode 100644 (file)
index 0000000..f309467
--- /dev/null
@@ -0,0 +1,142 @@
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/lib.h>
+#include <asm/processor.h>
+
+#define LVL_1_INST     1
+#define LVL_1_DATA     2
+#define LVL_2          3
+#define LVL_3          4
+#define LVL_TRACE      5
+
+struct _cache_table
+{
+       unsigned char descriptor;
+       char cache_type;
+       short size;
+};
+
+/* all the cache descriptor types we care about (no TLB or trace cache entries) */
+static struct _cache_table cache_table[] __initdata =
+{
+       { 0x06, LVL_1_INST, 8 },        /* 4-way set assoc, 32 byte line size */
+       { 0x08, LVL_1_INST, 16 },       /* 4-way set assoc, 32 byte line size */
+       { 0x0a, LVL_1_DATA, 8 },        /* 2 way set assoc, 32 byte line size */
+       { 0x0c, LVL_1_DATA, 16 },       /* 4-way set assoc, 32 byte line size */
+       { 0x22, LVL_3,      512 },      /* 4-way set assoc, sectored cache, 64 byte line size */
+       { 0x23, LVL_3,      1024 },     /* 8-way set assoc, sectored cache, 64 byte line size */
+       { 0x25, LVL_3,      2048 },     /* 8-way set assoc, sectored cache, 64 byte line size */
+       { 0x29, LVL_3,      4096 },     /* 8-way set assoc, sectored cache, 64 byte line size */
+       { 0x2c, LVL_1_DATA, 32 },       /* 8-way set assoc, 64 byte line size */
+       { 0x30, LVL_1_INST, 32 },       /* 8-way set assoc, 64 byte line size */
+       { 0x39, LVL_2,      128 },      /* 4-way set assoc, sectored cache, 64 byte line size */
+       { 0x3b, LVL_2,      128 },      /* 2-way set assoc, sectored cache, 64 byte line size */
+       { 0x3c, LVL_2,      256 },      /* 4-way set assoc, sectored cache, 64 byte line size */
+       { 0x41, LVL_2,      128 },      /* 4-way set assoc, 32 byte line size */
+       { 0x42, LVL_2,      256 },      /* 4-way set assoc, 32 byte line size */
+       { 0x43, LVL_2,      512 },      /* 4-way set assoc, 32 byte line size */
+       { 0x44, LVL_2,      1024 },     /* 4-way set assoc, 32 byte line size */
+       { 0x45, LVL_2,      2048 },     /* 4-way set assoc, 32 byte line size */
+       { 0x60, LVL_1_DATA, 16 },       /* 8-way set assoc, sectored cache, 64 byte line size */
+       { 0x66, LVL_1_DATA, 8 },        /* 4-way set assoc, sectored cache, 64 byte line size */
+       { 0x67, LVL_1_DATA, 16 },       /* 4-way set assoc, sectored cache, 64 byte line size */
+       { 0x68, LVL_1_DATA, 32 },       /* 4-way set assoc, sectored cache, 64 byte line size */
+       { 0x70, LVL_TRACE,  12 },       /* 8-way set assoc */
+       { 0x71, LVL_TRACE,  16 },       /* 8-way set assoc */
+       { 0x72, LVL_TRACE,  32 },       /* 8-way set assoc */
+       { 0x78, LVL_2,    1024 },       /* 4-way set assoc, 64 byte line size */
+       { 0x79, LVL_2,     128 },       /* 8-way set assoc, sectored cache, 64 byte line size */
+       { 0x7a, LVL_2,     256 },       /* 8-way set assoc, sectored cache, 64 byte line size */
+       { 0x7b, LVL_2,     512 },       /* 8-way set assoc, sectored cache, 64 byte line size */
+       { 0x7c, LVL_2,    1024 },       /* 8-way set assoc, sectored cache, 64 byte line size */
+       { 0x7d, LVL_2,    2048 },       /* 8-way set assoc, 64 byte line size */
+       { 0x7f, LVL_2,     512 },       /* 2-way set assoc, 64 byte line size */
+       { 0x82, LVL_2,     256 },       /* 8-way set assoc, 32 byte line size */
+       { 0x83, LVL_2,     512 },       /* 8-way set assoc, 32 byte line size */
+       { 0x84, LVL_2,    1024 },       /* 8-way set assoc, 32 byte line size */
+       { 0x85, LVL_2,    2048 },       /* 8-way set assoc, 32 byte line size */
+       { 0x86, LVL_2,     512 },       /* 4-way set assoc, 64 byte line size */
+       { 0x87, LVL_2,    1024 },       /* 8-way set assoc, 64 byte line size */
+       { 0x00, 0, 0}
+};
+
+unsigned int __init init_intel_cacheinfo(struct cpuinfo_x86 *c)
+{
+       unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
+
+       if (c->cpuid_level > 1) {
+               /* supports eax=2  call */
+               int i, j, n;
+               int regs[4];
+               unsigned char *dp = (unsigned char *)regs;
+
+               /* Number of times to iterate */
+               n = cpuid_eax(2) & 0xFF;
+
+               for ( i = 0 ; i < n ; i++ ) {
+                       cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
+
+                       /* If bit 31 is set, this is an unknown format */
+                       for ( j = 0 ; j < 3 ; j++ ) {
+                               if ( regs[j] < 0 ) regs[j] = 0;
+                       }
+
+                       /* Byte 0 is level count, not a descriptor */
+                       for ( j = 1 ; j < 16 ; j++ ) {
+                               unsigned char des = dp[j];
+                               unsigned char k = 0;
+
+                               /* look up this descriptor in the table */
+                               while (cache_table[k].descriptor != 0)
+                               {
+                                       if (cache_table[k].descriptor == des) {
+                                               switch (cache_table[k].cache_type) {
+                                               case LVL_1_INST:
+                                                       l1i += cache_table[k].size;
+                                                       break;
+                                               case LVL_1_DATA:
+                                                       l1d += cache_table[k].size;
+                                                       break;
+                                               case LVL_2:
+                                                       l2 += cache_table[k].size;
+                                                       break;
+                                               case LVL_3:
+                                                       l3 += cache_table[k].size;
+                                                       break;
+                                               case LVL_TRACE:
+                                                       trace += cache_table[k].size;
+                                                       break;
+                                               }
+
+                                               break;
+                                       }
+
+                                       k++;
+                               }
+                       }
+               }
+
+               if ( trace )
+                       printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
+               else if ( l1i )
+                       printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
+               if ( l1d )
+                       printk(", L1 D cache: %dK\n", l1d);
+               else
+                       printk("\n");
+               if ( l2 )
+                       printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
+               if ( l3 )
+                       printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
+
+               /*
+                * This assumes the L3 cache is shared; it typically lives in
+                * the northbridge.  The L1 caches are included by the L2
+                * cache, and so should not be included for the purpose of
+                * SMP switching weights.
+                */
+               c->x86_cache_size = l2 ? l2 : (l1i+l1d);
+       }
+
+       return l2;
+}
diff --git a/xen/arch/x86/cpu/rise.c b/xen/arch/x86/cpu/rise.c
new file mode 100644 (file)
index 0000000..3b1b0f4
--- /dev/null
@@ -0,0 +1,54 @@
+#include <xen/config.h>
+#include <xen/lib.h>
+#include <xen/init.h>
+#include <xen/bitops.h>
+#include <asm/processor.h>
+
+#include "cpu.h"
+
+static void __init init_rise(struct cpuinfo_x86 *c)
+{
+       printk("CPU: Rise iDragon");
+       if (c->x86_model > 2)
+               printk(" II");
+       printk("\n");
+
+       /* Unhide possibly hidden capability flags
+          The mp6 iDragon family don't have MSRs.
+          We switch on extra features with this cpuid weirdness: */
+       __asm__ (
+               "movl $0x6363452a, %%eax\n\t"
+               "movl $0x3231206c, %%ecx\n\t"
+               "movl $0x2a32313a, %%edx\n\t"
+               "cpuid\n\t"
+               "movl $0x63634523, %%eax\n\t"
+               "movl $0x32315f6c, %%ecx\n\t"
+               "movl $0x2333313a, %%edx\n\t"
+               "cpuid\n\t" : : : "eax", "ebx", "ecx", "edx"
+       );
+       set_bit(X86_FEATURE_CX8, c->x86_capability);
+}
+
+static struct cpu_dev rise_cpu_dev __initdata = {
+       .c_vendor       = "Rise",
+       .c_ident        = { "RiseRiseRise" },
+       .c_models = {
+               { .vendor = X86_VENDOR_RISE, .family = 5, .model_names = 
+                 { 
+                         [0] = "iDragon", 
+                         [2] = "iDragon", 
+                         [8] = "iDragon II", 
+                         [9] = "iDragon II"
+                 }
+               },
+       },
+       .c_init         = init_rise,
+};
+
+int __init rise_init_cpu(void)
+{
+       cpu_devs[X86_VENDOR_RISE] = &rise_cpu_dev;
+       return 0;
+}
+
+//early_arch_initcall(rise_init_cpu);
diff --git a/xen/arch/x86/cpu/transmeta.c b/xen/arch/x86/cpu/transmeta.c
new file mode 100644 (file)
index 0000000..c296006
--- /dev/null
@@ -0,0 +1,108 @@
+#include <xen/config.h>
+#include <xen/lib.h>
+#include <xen/init.h>
+#include <asm/processor.h>
+#include <asm/msr.h>
+#include "cpu.h"
+
+static void __init init_transmeta(struct cpuinfo_x86 *c)
+{
+       unsigned int cap_mask, uk, max, dummy;
+       unsigned int cms_rev1, cms_rev2;
+       unsigned int cpu_rev, cpu_freq, cpu_flags, new_cpu_rev;
+       char cpu_info[65];
+
+       get_model_name(c);      /* Same as AMD/Cyrix */
+       display_cacheinfo(c);
+
+       /* Print CMS and CPU revision */
+       max = cpuid_eax(0x80860000);
+       cpu_rev = 0;
+       if ( max >= 0x80860001 ) {
+               cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags); 
+               if (cpu_rev != 0x02000000) {
+                       printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, %u MHz\n",
+                               (cpu_rev >> 24) & 0xff,
+                               (cpu_rev >> 16) & 0xff,
+                               (cpu_rev >> 8) & 0xff,
+                               cpu_rev & 0xff,
+                               cpu_freq);
+               }
+       }
+       if ( max >= 0x80860002 ) {
+               cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy);
+               if (cpu_rev == 0x02000000) {
+                       printk(KERN_INFO "CPU: Processor revision %08X, %u MHz\n",
+                               new_cpu_rev, cpu_freq);
+               }
+               printk(KERN_INFO "CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n",
+                      (cms_rev1 >> 24) & 0xff,
+                      (cms_rev1 >> 16) & 0xff,
+                      (cms_rev1 >> 8) & 0xff,
+                      cms_rev1 & 0xff,
+                      cms_rev2);
+       }
+       if ( max >= 0x80860006 ) {
+               cpuid(0x80860003,
+                     (void *)&cpu_info[0],
+                     (void *)&cpu_info[4],
+                     (void *)&cpu_info[8],
+                     (void *)&cpu_info[12]);
+               cpuid(0x80860004,
+                     (void *)&cpu_info[16],
+                     (void *)&cpu_info[20],
+                     (void *)&cpu_info[24],
+                     (void *)&cpu_info[28]);
+               cpuid(0x80860005,
+                     (void *)&cpu_info[32],
+                     (void *)&cpu_info[36],
+                     (void *)&cpu_info[40],
+                     (void *)&cpu_info[44]);
+               cpuid(0x80860006,
+                     (void *)&cpu_info[48],
+                     (void *)&cpu_info[52],
+                     (void *)&cpu_info[56],
+                     (void *)&cpu_info[60]);
+               cpu_info[64] = '\0';
+               printk(KERN_INFO "CPU: %s\n", cpu_info);
+       }
+
+       /* Unhide possibly hidden capability flags */
+       rdmsr(0x80860004, cap_mask, uk);
+       wrmsr(0x80860004, ~0, uk);
+       c->x86_capability[0] = cpuid_edx(0x00000001);
+       wrmsr(0x80860004, cap_mask, uk);
+       
+       /* If we can run i686 user-space code, call us an i686 */
+#define USER686 (X86_FEATURE_TSC|X86_FEATURE_CX8|X86_FEATURE_CMOV)
+        if ( c->x86 == 5 && (c->x86_capability[0] & USER686) == USER686 )
+               c->x86 = 6;
+}
+
+static void transmeta_identify(struct cpuinfo_x86 * c)
+{
+       u32 xlvl;
+       generic_identify(c);
+
+       /* Transmeta-defined flags: level 0x80860001 */
+       xlvl = cpuid_eax(0x80860000);
+       if ( (xlvl & 0xffff0000) == 0x80860000 ) {
+               if (  xlvl >= 0x80860001 )
+                       c->x86_capability[2] = cpuid_edx(0x80860001);
+       }
+}
+
+static struct cpu_dev transmeta_cpu_dev __initdata = {
+       .c_vendor       = "Transmeta",
+       .c_ident        = { "GenuineTMx86", "TransmetaCPU" },
+       .c_init         = init_transmeta,
+       .c_identify     = transmeta_identify,
+};
+
+int __init transmeta_init_cpu(void)
+{
+       cpu_devs[X86_VENDOR_TRANSMETA] = &transmeta_cpu_dev;
+       return 0;
+}
+
+//early_arch_initcall(transmeta_init_cpu);
index 0751594777d5d57f9aade0a7eead2617af37b6a2..078d59d8fd7072ac6ebd62a05af2b75fc9d1e59b 100644 (file)
@@ -82,7 +82,8 @@ extern void init_IRQ(void);
 extern void trap_init(void);
 extern void time_init(void);
 extern void ac_timer_init(void);
-extern void initialize_keytable();
+extern void initialize_keytable(void);
+extern void early_cpu_init(void);
 
 extern unsigned long cpu0_stack[];
 
@@ -101,256 +102,6 @@ int acpi_disabled;
 
 int logical_proc_id[NR_CPUS];
 
-/* Standard macro to see if a specific flag is changeable. */
-static inline int flag_is_changeable_p(unsigned long flag)
-{
-    unsigned long f1, f2;
-
-    asm("pushf\n\t"
-        "pushf\n\t"
-        "pop %0\n\t"
-        "mov %0,%1\n\t"
-        "xor %2,%0\n\t"
-        "push %0\n\t"
-        "popf\n\t"
-        "pushf\n\t"
-        "pop %0\n\t"
-        "popf\n\t"
-        : "=&r" (f1), "=&r" (f2)
-        : "ir" (flag));
-
-    return ((f1^f2) & flag) != 0;
-}
-
-/* Probe for the CPUID instruction */
-static int __init have_cpuid_p(void)
-{
-    return flag_is_changeable_p(X86_EFLAGS_ID);
-}
-
-void __init get_cpu_vendor(struct cpuinfo_x86 *c)
-{
-    char *v = c->x86_vendor_id;
-
-    if (!strcmp(v, "GenuineIntel"))
-        c->x86_vendor = X86_VENDOR_INTEL;
-    else if (!strcmp(v, "AuthenticAMD"))
-        c->x86_vendor = X86_VENDOR_AMD;
-    else if (!strcmp(v, "CyrixInstead"))
-        c->x86_vendor = X86_VENDOR_CYRIX;
-    else if (!strcmp(v, "UMC UMC UMC "))
-        c->x86_vendor = X86_VENDOR_UMC;
-    else if (!strcmp(v, "CentaurHauls"))
-        c->x86_vendor = X86_VENDOR_CENTAUR;
-    else if (!strcmp(v, "NexGenDriven"))
-        c->x86_vendor = X86_VENDOR_NEXGEN;
-    else if (!strcmp(v, "RiseRiseRise"))
-        c->x86_vendor = X86_VENDOR_RISE;
-    else if (!strcmp(v, "GenuineTMx86") ||
-             !strcmp(v, "TransmetaCPU"))
-        c->x86_vendor = X86_VENDOR_TRANSMETA;
-    else
-        c->x86_vendor = X86_VENDOR_UNKNOWN;
-}
-
-static void __init init_intel(struct cpuinfo_x86 *c)
-{
-    /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */
-    if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 )
-        clear_bit(X86_FEATURE_SEP, &c->x86_capability);
-
-    if ( test_bit(X86_FEATURE_HT, &c->x86_capability) )
-    {
-        u32     eax, ebx, ecx, edx;
-        int     initial_apic_id, siblings, cpu = smp_processor_id();
-
-        cpuid(1, &eax, &ebx, &ecx, &edx);
-        ht_per_core = siblings = (ebx & 0xff0000) >> 16;
-
-        if ( opt_noht )
-            clear_bit(X86_FEATURE_HT, &c->x86_capability[0]);
-
-        if ( siblings <= 1 )
-        {
-            printk(KERN_INFO  "CPU#%d: Hyper-Threading is disabled\n", cpu);
-        } 
-        else if ( siblings > 2 )
-        {
-            panic("We don't support more than two logical CPUs per package!");
-        }
-        else
-        {
-            initial_apic_id = ebx >> 24 & 0xff;
-            phys_proc_id[cpu]    = initial_apic_id >> 1;
-            logical_proc_id[cpu] = initial_apic_id & 1;
-            printk(KERN_INFO  "CPU#%d: Physical ID: %d, Logical ID: %d\n",
-                   cpu, phys_proc_id[cpu], logical_proc_id[cpu]);
-        }
-    }
-
-#ifdef CONFIG_VMX
-    start_vmx();
-#endif
-
-}
-
-static void __init init_amd(struct cpuinfo_x86 *c)
-{
-    /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
-       3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
-    clear_bit(0*32+31, &c->x86_capability);
-       
-    switch(c->x86)
-    {
-    case 5:
-        panic("AMD K6 is not supported.\n");
-    case 6:    /* An Athlon/Duron. We can trust the BIOS probably */
-        break;         
-    }
-}
-
-/*
- * This does the hard work of actually picking apart the CPU stuff...
- */
-void __init identify_cpu(struct cpuinfo_x86 *c)
-{
-    int i, cpu = smp_processor_id();
-    u32 xlvl, tfms, junk;
-
-    phys_proc_id[cpu]    = cpu;
-    logical_proc_id[cpu] = 0;
-
-    c->x86_vendor = X86_VENDOR_UNKNOWN;
-    c->cpuid_level = -1;       /* CPUID not detected */
-    c->x86_model = c->x86_mask = 0;    /* So far unknown... */
-    c->x86_vendor_id[0] = '\0'; /* Unset */
-    memset(&c->x86_capability, 0, sizeof c->x86_capability);
-
-    if ( !have_cpuid_p() )
-        panic("Ancient processors not supported\n");
-
-    /* Get vendor name */
-    cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
-          (unsigned int *)&c->x86_vendor_id[0],
-          (unsigned int *)&c->x86_vendor_id[8],
-          (unsigned int *)&c->x86_vendor_id[4]);
-
-    get_cpu_vendor(c);
-               
-    if ( c->cpuid_level == 0 )
-        panic("Decrepit CPUID not supported\n");
-
-    cpuid(0x00000001, &tfms, &junk, &junk,
-          &c->x86_capability[0]);
-    c->x86 = (tfms >> 8) & 15;
-    c->x86_model = (tfms >> 4) & 15;
-    c->x86_mask = tfms & 15;
-
-    /* AMD-defined flags: level 0x80000001 */
-    xlvl = cpuid_eax(0x80000000);
-    if ( (xlvl & 0xffff0000) == 0x80000000 ) {
-        if ( xlvl >= 0x80000001 )
-            c->x86_capability[1] = cpuid_edx(0x80000001);
-    }
-
-    /* Transmeta-defined flags: level 0x80860001 */
-    xlvl = cpuid_eax(0x80860000);
-    if ( (xlvl & 0xffff0000) == 0x80860000 ) {
-        if (  xlvl >= 0x80860001 )
-            c->x86_capability[2] = cpuid_edx(0x80860001);
-    }
-
-    printk("CPU%d: Before vendor init, caps: %08x %08x %08x, vendor = %d\n",
-           smp_processor_id(),
-           c->x86_capability[0],
-           c->x86_capability[1],
-           c->x86_capability[2],
-           c->x86_vendor);
-
-    switch ( c->x86_vendor ) {
-    case X86_VENDOR_INTEL:
-        init_intel(c);
-        break;
-    case X86_VENDOR_AMD:
-        init_amd(c);
-        break;
-    case X86_VENDOR_UNKNOWN:  /* Connectix Virtual PC reports this */
-       break;
-    case X86_VENDOR_CENTAUR:
-        break;
-    default:
-        printk("Unknown CPU identifier (%d): continuing anyway, "
-               "but might fail.\n", c->x86_vendor);
-    }
-       
-    printk("CPU caps: %08x %08x %08x %08x\n",
-           c->x86_capability[0],
-           c->x86_capability[1],
-           c->x86_capability[2],
-           c->x86_capability[3]);
-
-    /*
-     * On SMP, boot_cpu_data holds the common feature set between
-     * all CPUs; so make sure that we indicate which features are
-     * common between the CPUs.  The first time this routine gets
-     * executed, c == &boot_cpu_data.
-     */
-    if ( c != &boot_cpu_data ) {
-        /* AND the already accumulated flags with these */
-        for ( i = 0 ; i < NCAPINTS ; i++ )
-            boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
-    }
-}
-
-void __init print_cpu_info(struct cpuinfo_x86 *c)
-{
-    printk("booted.\n");
-}
-
-unsigned long cpu_initialized;
-void __init cpu_init(void)
-{
-    int nr = smp_processor_id();
-    struct tss_struct *t = &init_tss[nr];
-    char gdt_load[10];
-
-    if ( test_and_set_bit(nr, &cpu_initialized) )
-        panic("CPU#%d already initialized!!!\n", nr);
-    printk("Initializing CPU#%d\n", nr);
-
-    *(unsigned short *)(&gdt_load[0]) = LAST_RESERVED_GDT_BYTE;
-    *(unsigned long  *)(&gdt_load[2]) = GDT_VIRT_START(current);
-    __asm__ __volatile__ ( "lgdt %0" : "=m" (gdt_load) );
-
-    /* No nested task. */
-    __asm__ __volatile__ ( "pushf ; andw $0xbfff,(%"__OP"sp) ; popf" );
-
-    /* Ensure FPU gets initialised for each domain. */
-    stts();
-
-    /* Set up and load the per-CPU TSS and LDT. */
-    t->bitmap = IOBMP_INVALID_OFFSET;
-#if defined(CONFIG_X86_32)
-    t->ss0  = __HYPERVISOR_DS;
-    t->esp0 = get_stack_bottom();
-#elif defined(CONFIG_X86_64)
-    /* Bottom-of-stack must be 16-byte aligned or CPU will force it! :-o */
-    BUG_ON((get_stack_bottom() & 15) != 0);
-    t->rsp0 = get_stack_bottom();
-#endif
-    set_tss_desc(nr,t);
-    load_TR(nr);
-    __asm__ __volatile__ ( "lldt %%ax" : : "a" (0) );
-
-    /* Clear all 6 debug registers. */
-#define CD(register) __asm__ ( "mov %0,%%db" #register : : "r" (0UL) );
-    CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
-#undef CD
-
-    /* Install correct page table. */
-    write_ptbase(current);
-}
-
 int acpi_force;
 char acpi_param[10] = "";
 static void parse_acpi_param(char *s)
@@ -415,6 +166,7 @@ static void __init start_of_day(void)
         virt_to_phys(gdt_table) >> PAGE_SHIFT, 1, PAGE_HYPERVISOR);
 
     /* Process CPU type information. */
+    early_cpu_init();
     identify_cpu(&boot_cpu_data);
     if ( cpu_has_fxsr )
         set_in_cr4(X86_CR4_OSFXSR);
index 5e4f6196ec6af5ee17286919259a0937f7c21752..4f3925e8941d5525b812e989a708c2cbbb8d8eaa 100644 (file)
@@ -7,11 +7,10 @@
 #ifndef __X86_CONFIG_H__
 #define __X86_CONFIG_H__
 
-#define CONFIG_VMX 1
-
 #define CONFIG_X86 1
+#define CONFIG_X86_HT 1
 #define CONFIG_SHADOW 1
-
+#define CONFIG_VMX 1
 #define CONFIG_SMP 1
 #define CONFIG_X86_LOCAL_APIC 1
 #define CONFIG_X86_GOOD_APIC 1
index 58aae2a74e10097040a45ede45622c27fb2d569c..e695162de46f5ac7199895b9b7efbf78490e7dd0 100644 (file)
@@ -4,13 +4,12 @@
  * Defines x86 CPU feature bits
  */
 
-#ifndef __ASM_X86_CPUFEATURE_H
-#define __ASM_X86_CPUFEATURE_H
+#ifndef __ASM_I386_CPUFEATURE_H
+#define __ASM_I386_CPUFEATURE_H
 
-/* Sample usage: CPU_FEATURE_P(cpu.x86_capability, FPU) */
-#define CPU_FEATURE_P(CAP, FEATURE) test_bit(CAP, X86_FEATURE_##FEATURE)
+#include <xen/bitops.h>
 
-#define NCAPINTS       6       /* Currently we have 6 32-bit words worth of info */
+#define NCAPINTS       7       /* N 32-bit words worth of info */
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
 #define X86_FEATURE_FPU                (0*32+ 0) /* Onboard FPU */
@@ -48,7 +47,7 @@
 /* Don't duplicate feature flags which are redundant with Intel! */
 #define X86_FEATURE_SYSCALL    (1*32+11) /* SYSCALL/SYSRET */
 #define X86_FEATURE_MP         (1*32+19) /* MP Capable. */
-#define X86_FEATURE_NX         (1*32+20) /* No-Execute Bit. */
+#define X86_FEATURE_NX         (1*32+20) /* Execute Disable */
 #define X86_FEATURE_MMXEXT     (1*32+22) /* AMD MMX extensions */
 #define X86_FEATURE_LM         (1*32+29) /* Long Mode (x86-64) */
 #define X86_FEATURE_3DNOWEXT   (1*32+30) /* AMD 3DNow! extensions */
 #define X86_FEATURE_P4         (3*32+ 7) /* P4 */
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
+#define X86_FEATURE_XMM3       (4*32+ 0) /* Streaming SIMD Extensions-3 */
 #define X86_FEATURE_MWAIT      (4*32+ 3) /* Monitor/Mwait support */
+#define X86_FEATURE_DSCPL      (4*32+ 4) /* CPL Qualified Debug Store */
 #define X86_FEATURE_VMXE       (4*32+ 5) /* Virtual Machine Extensions */
 #define X86_FEATURE_EST                (4*32+ 7) /* Enhanced SpeedStep */
+#define X86_FEATURE_TM2                (4*32+ 8) /* Thermal Monitor 2 */
+#define X86_FEATURE_CID                (4*32+10) /* Context ID */
+#define X86_FEATURE_CX16        (4*32+13) /* CMPXCHG16B */
+#define X86_FEATURE_XTPR       (4*32+14) /* Send Task Priority Messages */
 
 /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
 #define X86_FEATURE_XSTORE     (5*32+ 2) /* on-CPU RNG present (xstore insn) */
+#define X86_FEATURE_XSTORE_EN  (5*32+ 3) /* on-CPU RNG enabled */
+#define X86_FEATURE_XCRYPT     (5*32+ 6) /* on-CPU crypto (xcrypt insn) */
+#define X86_FEATURE_XCRYPT_EN  (5*32+ 7) /* on-CPU crypto enabled */
 
+/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
+#define X86_FEATURE_LAHF_LM    (5*32+ 0) /* LAHF/SAHF in long mode */
+#define X86_FEATURE_CMP_LEGACY (5*32+ 1) /* If yes HyperThreading not valid */
 
 #define cpu_has(c, bit)                test_bit(bit, (c)->x86_capability)
 #define boot_cpu_has(bit)      test_bit(bit, boot_cpu_data.x86_capability)
 #define cpu_has_tsc            boot_cpu_has(X86_FEATURE_TSC)
 #define cpu_has_pae            boot_cpu_has(X86_FEATURE_PAE)
 #define cpu_has_pge            boot_cpu_has(X86_FEATURE_PGE)
-#define cpu_has_sse2           boot_cpu_has(X86_FEATURE_XMM2)
 #define cpu_has_apic           boot_cpu_has(X86_FEATURE_APIC)
 #define cpu_has_sep            boot_cpu_has(X86_FEATURE_SEP)
 #define cpu_has_mtrr           boot_cpu_has(X86_FEATURE_MTRR)
 #define cpu_has_mmx            boot_cpu_has(X86_FEATURE_MMX)
 #define cpu_has_fxsr           boot_cpu_has(X86_FEATURE_FXSR)
 #define cpu_has_xmm            boot_cpu_has(X86_FEATURE_XMM)
+#define cpu_has_xmm2           boot_cpu_has(X86_FEATURE_XMM2)
+#define cpu_has_xmm3           boot_cpu_has(X86_FEATURE_XMM3)
 #define cpu_has_ht             boot_cpu_has(X86_FEATURE_HT)
 #define cpu_has_mp             boot_cpu_has(X86_FEATURE_MP)
 #define cpu_has_nx             boot_cpu_has(X86_FEATURE_NX)
 #define cpu_has_cyrix_arr      boot_cpu_has(X86_FEATURE_CYRIX_ARR)
 #define cpu_has_centaur_mcr    boot_cpu_has(X86_FEATURE_CENTAUR_MCR)
 #define cpu_has_xstore         boot_cpu_has(X86_FEATURE_XSTORE)
+#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
+#define cpu_has_xcrypt         boot_cpu_has(X86_FEATURE_XCRYPT)
+#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN)
 
-#endif /* __ASM_X86_CPUFEATURE_H */
+#endif /* __ASM_I386_CPUFEATURE_H */
+
+/* 
+ * Local Variables:
+ * mode:c
+ * comment-column:42
+ * End:
+ */
index e576613c1338142f6a8edae69f1bdd0da470b58b..35163029e51d18e24dbf8d0f8d603524c2e805b9 100644 (file)
 #define MSR_IA32_PLATFORM_ID           0x17
 #define MSR_IA32_EBL_CR_POWERON                0x2a
 
+#define MSR_IA32_APICBASE              0x1b
+#define MSR_IA32_APICBASE_BSP          (1<<8)
+#define MSR_IA32_APICBASE_ENABLE       (1<<11)
+#define MSR_IA32_APICBASE_BASE         (0xfffff<<12)
+
+#define MSR_IA32_UCODE_WRITE           0x79
+#define MSR_IA32_UCODE_REV             0x8b
+
+#define MSR_P6_PERFCTR0      0xc1
+#define MSR_P6_PERFCTR1      0xc2
+
 /* MSRs & bits used for VMX enabling */
 #define MSR_IA32_VMX_BASIC_MSR                  0x480
 #define IA32_FEATURE_CONTROL_MSR                0x3a
 /* Intel MSRs. Some also available on other CPUs */
 #define MSR_IA32_PLATFORM_ID   0x17
 
-#define MSR_IA32_PERFCTR0      0xc1
-#define MSR_IA32_PERFCTR1      0xc2
-
 #define MSR_MTRRcap            0x0fe
 #define MSR_IA32_BBL_CR_CTL        0x119
 
 #define MSR_IA32_MCG_STATUS        0x17a
 #define MSR_IA32_MCG_CTL       0x17b
 
-#define MSR_IA32_EVNTSEL0      0x186
-#define MSR_IA32_EVNTSEL1      0x187
-
 #define MSR_MTRRfix64K_00000   0x250
 #define MSR_MTRRfix16K_80000   0x258
 #define MSR_MTRRfix16K_A0000   0x259
 
 #define MSR_IA32_DS_AREA       0x600
 
-#define MSR_IA32_APICBASE              0x1b
-#define MSR_IA32_APICBASE_BSP          (1<<8)
-#define MSR_IA32_APICBASE_ENABLE       (1<<11)
-#define MSR_IA32_APICBASE_BASE         (0xfffff<<12)
-
-#define MSR_IA32_UCODE_WRITE           0x79
-#define MSR_IA32_UCODE_REV             0x8b
-
 #define MSR_IA32_BBL_CR_CTL            0x119
 
 #define MSR_IA32_MCG_CAP               0x179
 /* VIA Cyrix defined MSRs*/
 #define MSR_VIA_FCR                    0x1107
 #define MSR_VIA_LONGHAUL               0x110a
+#define MSR_VIA_RNG                    0x110b
 #define MSR_VIA_BCR2                   0x1147
 
 /* Transmeta defined MSRs */
index 94b82d4ba32c8fb63f60a34ae85fcb6d6ba10528..6fc44ade5d6d8613e1b1ab34ff732c74e92a69c2 100644 (file)
@@ -26,8 +26,7 @@
 #define X86_VENDOR_RISE 6
 #define X86_VENDOR_TRANSMETA 7
 #define X86_VENDOR_NSC 8
-#define X86_VENDOR_SIS 9
-#define X86_VENDOR_NUM 10
+#define X86_VENDOR_NUM 9
 #define X86_VENDOR_UNKNOWN 0xff
 
 /*
@@ -146,23 +145,26 @@ struct exec_domain;
   ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
 #endif
 
-/*
- *  CPU type and hardware bug flags. Kept separately for each CPU.
- *  Members of this structure are referenced in head.S, so think twice
- *  before touching them. [mj]
- */
-
 struct cpuinfo_x86 {
-    __u8    x86;            /* CPU family */
-    __u8    x86_vendor;     /* CPU vendor */
-    __u8    x86_model;
-    __u8    x86_mask;
-    int     cpuid_level;    /* Maximum supported CPUID level, -1=no CPUID */
-    __u32   x86_capability[NCAPINTS];
-    char    x86_vendor_id[16];
-    int     x86_cache_size;  /* in KB - for CPUS that support this call  */
-    int            x86_clflush_size;
-    int            x86_tlbsize;     /* number of 4K pages in DTLB/ITLB combined */
+       __u8    x86;            /* CPU family */
+       __u8    x86_vendor;     /* CPU vendor */
+       __u8    x86_model;
+       __u8    x86_mask;
+       char    wp_works_ok;    /* It doesn't on 386's */
+       char    hlt_works_ok;   /* Problems on some 486Dx4's and old 386's */
+       char    hard_math;
+       char    rfu;
+               int     cpuid_level;    /* Maximum supported CPUID level, -1=no CPUID */
+       unsigned long   x86_capability[NCAPINTS];
+       char    x86_vendor_id[16];
+       char    x86_model_id[64];
+       int     x86_cache_size;  /* in KB - valid for CPUS which support this
+                                   call  */
+       int     x86_cache_alignment;    /* In bytes */
+       int     fdiv_bug;
+       int     f00f_bug;
+       int     coma_bug;
+       unsigned char x86_num_cores;
 } __cacheline_aligned;
 
 /*
@@ -179,15 +181,23 @@ extern struct cpuinfo_x86 cpu_data[];
 #define current_cpu_data boot_cpu_data
 #endif
 
-extern  int phys_proc_id[NR_CPUS];
-extern char ignore_irq13;
+extern int phys_proc_id[NR_CPUS];
 
 extern void identify_cpu(struct cpuinfo_x86 *);
 extern void print_cpu_info(struct cpuinfo_x86 *);
+extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
 extern void dodgy_tsc(void);
 
+#ifdef CONFIG_X86_HT
+extern void detect_ht(struct cpuinfo_x86 *c);
+#else
+static inline void detect_ht(struct cpuinfo_x86 *c) {}
+#endif
+
 /*
  * Generic CPUID function
+ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
+ * resulting in stale register contents being returned.
  */
 static inline void cpuid(
     int op, unsigned int *eax, unsigned int *ebx,
@@ -195,10 +205,10 @@ static inline void cpuid(
 {
     __asm__("cpuid"
             : "=a" (*eax),
-            "=b" (*ebx),
-            "=c" (*ecx),
-            "=d" (*edx)
-            : "0" (op));
+              "=b" (*ebx),
+              "=c" (*ecx),
+              "=d" (*edx)
+            : "0" (op), "2" (0));
 }
 
 /*
@@ -327,6 +337,23 @@ static inline void clear_in_cr4 (unsigned long mask)
        outb((data), 0x23); \
 } while (0)
 
+static inline void __monitor(const void *eax, unsigned long ecx,
+               unsigned long edx)
+{
+       /* "monitor %eax,%ecx,%edx;" */
+       asm volatile(
+               ".byte 0x0f,0x01,0xc8;"
+               : :"a" (eax), "c" (ecx), "d"(edx));
+}
+
+static inline void __mwait(unsigned long eax, unsigned long ecx)
+{
+       /* "mwait %eax,%ecx;" */
+       asm volatile(
+               ".byte 0x0f,0x01,0xc9;"
+               : :"a" (eax), "c" (ecx));
+}
+
 #define IOBMP_BYTES             8192
 #define IOBMP_INVALID_OFFSET    0x8000